code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 716
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """yolos"""
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = version.parse("""1.11""" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return 1e-4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 12
| 655
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655
| 0
|
snake_case__ : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Return True if there is node that has not iterated.
__lowercase = [False] * len(_SCREAMING_SNAKE_CASE )
__lowercase = [s]
__lowercase = True
while queue:
__lowercase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_SCREAMING_SNAKE_CASE )
__lowercase = True
__lowercase = u
return visited[t]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = [-1] * (len(_SCREAMING_SNAKE_CASE ))
__lowercase = 0
__lowercase = []
__lowercase = [i[:] for i in graph] # Record original cut, copy.
while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = float("Inf" )
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 718
|
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModel.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModel.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForPreTraining.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForCausalLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
__lowercase , __lowercase = AutoModelForCausalLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelWithLMHead.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForMaskedLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForMaskedLM.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
__lowercase , __lowercase = AutoModelForMaskedLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__lowercase , __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
__lowercase , __lowercase = AutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__lowercase = AutoModelForQuestionAnswering.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
__lowercase = AutoModelWithLMHead.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
__lowercase = AutoModelWithLMHead.from_pretrained(lowerCamelCase , from_tf=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase ) , 14_410 )
| 719
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655
| 0
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
snake_case__ : List[Any] = _symbol_database.Default()
snake_case__ : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
snake_case__ : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
snake_case__ : Dict = None
snake_case__ : List[Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
snake_case__ : Any = 45
snake_case__ : Any = 15_81
snake_case__ : Any = 15_17
snake_case__ : Tuple = 15_70
snake_case__ : Dict = 15_84
snake_case__ : Any = 17_93
snake_case__ : Union[str, Any] = 17_95
snake_case__ : Dict = 19_16
snake_case__ : Any = 18_64
snake_case__ : Union[str, Any] = 19_05
snake_case__ : Optional[int] = 19_19
snake_case__ : Union[str, Any] = 24_29
snake_case__ : List[str] = 22_08
snake_case__ : Dict = 24_18
snake_case__ : Any = 23_23
snake_case__ : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 720
|
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655
| 0
|
from sklearn.metrics import recall_score
import datasets
snake_case__ : Optional[int] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
snake_case__ : int = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
snake_case__ : int = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[Any]=1 , lowerCamelCase : List[str]="binary" , lowerCamelCase : Tuple=None , lowerCamelCase : Tuple="warn" , ):
'''simple docstring'''
__lowercase = recall_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , )
return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
| 721
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655
| 0
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
snake_case__ : int = random.Random()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str]=7 , lowerCamelCase : Optional[int]=400 , lowerCamelCase : Union[str, Any]=2_000 , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : Optional[int]=160 , lowerCamelCase : Union[str, Any]=8 , lowerCamelCase : str=0.0 , lowerCamelCase : Dict=4_000 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : int=True , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
__lowercase = feature_size
__lowercase = chunk_length
__lowercase = hop_length
def _snake_case ( self : str ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int=False , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
def _flatten(lowerCamelCase : Any ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = WhisperFeatureExtractor if is_speech_available() else None
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = WhisperFeatureExtractionTester(self )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(lowerCamelCase )[0]
check_json_file_has_correct_format(lowerCamelCase )
__lowercase = self.feature_extraction_class.from_pretrained(lowerCamelCase )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowerCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCamelCase )
__lowercase = self.feature_extraction_class.from_json_file(lowerCamelCase )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowercase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowerCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(lowerCamelCase , return_tensors="np" ).input_features
__lowercase = feature_extractor(lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowercase = np.asarray(lowerCamelCase )
__lowercase = feature_extractor(lowerCamelCase , return_tensors="np" ).input_features
__lowercase = feature_extractor(lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
# Test truncation required
__lowercase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowercase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
__lowercase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowercase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs_truncated]
__lowercase = feature_extractor(lowerCamelCase , return_tensors="np" ).input_features
__lowercase = feature_extractor(lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowercase = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = WhisperFeatureExtractor()
__lowercase = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase , atol=1e-4 ) )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = self._load_datasamples(1 )[0]
__lowercase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__lowercase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase ) - 1 ) < 1e-3 ) )
| 700
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : List[str] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ["""PoolFormerFeatureExtractor"""]
snake_case__ : Optional[int] = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 701
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = [], []
while len(_SCREAMING_SNAKE_CASE ) > 1:
__lowercase , __lowercase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
start.append(_SCREAMING_SNAKE_CASE )
end.append(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ : Dict = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 702
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE = 2_0_0 ):
__lowercase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__lowercase = [0] * (pence + 1)
__lowercase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 703
|
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowercase = [[float("inf" ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
__lowercase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowercase = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
snake_case__ : Dict = int(input("""Enter number of vertices: """))
snake_case__ : str = int(input("""Enter number of edges: """))
snake_case__ : str = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
snake_case__ : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
snake_case__ : Union[str, Any] = int(input("""Enter source:"""))
snake_case__ : Optional[Any] = int(input("""Enter destination:"""))
snake_case__ : int = float(input("""Enter weight:"""))
snake_case__ : Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 704
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableUnCLIPImgaImgPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : int = frozenset([] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = 32
__lowercase = embedder_hidden_size
# image encoding components
__lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__lowercase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL()
__lowercase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 , lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCamelCase )
else:
__lowercase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 , 1 )
__lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__lowercase = sd_pipe(**lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__lowercase = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 655
| 0
|
from pathlib import Path
import fire
from tqdm import tqdm
def snake_case_ ( _SCREAMING_SNAKE_CASE="ro" , _SCREAMING_SNAKE_CASE="en" , _SCREAMING_SNAKE_CASE="wmt16" , _SCREAMING_SNAKE_CASE=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__lowercase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
__lowercase = datasets.load_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if save_dir is None:
__lowercase = F"""{dataset}-{pair}"""
__lowercase = Path(_SCREAMING_SNAKE_CASE )
save_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowercase = "val" if split == "validation" else split
__lowercase = save_dir.joinpath(F"""{fn}.source""" )
__lowercase = save_dir.joinpath(F"""{fn}.target""" )
__lowercase = src_path.open("w+" )
__lowercase = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowercase = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 705
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
snake_case__ : List[Any] = logging.getLogger(__name__)
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = False
def _snake_case ( self : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if not self.initialized:
__lowercase = RagRetriever(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , index=lowerCamelCase , init_retrieval=lowerCamelCase , )
__lowercase = True
def _snake_case ( self : Dict ):
'''simple docstring'''
self.retriever.index.init_index()
def _snake_case ( self : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase , __lowercase = self.retriever._main_retrieve(lowerCamelCase , lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowerCamelCase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , index=lowerCamelCase , init_retrieval=lowerCamelCase , )
__lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for worker in self.retrieval_workers
] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Tuple ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowercase , __lowercase = ray.get(random_worker.retrieve.remote(lowerCamelCase , lowerCamelCase ) )
else:
__lowercase , __lowercase = self._main_retrieve(lowerCamelCase , lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase )
@classmethod
def _snake_case ( cls : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : int ):
'''simple docstring'''
return super(lowerCamelCase , cls ).get_tokenizers(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
@classmethod
def _snake_case ( cls : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : str=None , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = kwargs.pop("config" , lowerCamelCase ) or RagConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = RagTokenizer.from_pretrained(lowerCamelCase , config=lowerCamelCase )
__lowercase = rag_tokenizer.question_encoder
__lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase = "custom"
__lowercase = CustomHFIndex(config.retrieval_vector_size , lowerCamelCase )
else:
__lowercase = cls._build_index(lowerCamelCase )
return cls(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , retrieval_workers=lowerCamelCase , index=lowerCamelCase , )
| 706
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ : Optional[int] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = state_dict.pop(_SCREAMING_SNAKE_CASE )
__lowercase = val
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowercase = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__lowercase = value
else:
__lowercase = value
return new_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowercase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:2_5_6, :]
__lowercase = in_proj_bias[:2_5_6]
__lowercase = in_proj_weight[2_5_6:5_1_2, :]
__lowercase = in_proj_bias[2_5_6:5_1_2]
__lowercase = in_proj_weight[-2_5_6:, :]
__lowercase = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowercase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowercase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:2_5_6, :]
__lowercase = in_proj_bias[:2_5_6]
__lowercase = in_proj_weight[2_5_6:5_1_2, :]
__lowercase = in_proj_bias[2_5_6:5_1_2]
__lowercase = in_proj_weight[-2_5_6:, :]
__lowercase = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
__lowercase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowercase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowercase = in_proj_weight_cross_attn[:2_5_6, :]
__lowercase = in_proj_bias_cross_attn[:2_5_6]
__lowercase = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
__lowercase = in_proj_bias_cross_attn[2_5_6:5_1_2]
__lowercase = in_proj_weight_cross_attn[-2_5_6:, :]
__lowercase = in_proj_bias_cross_attn[-2_5_6:]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = image.size
__lowercase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = 8_0_0 if "detection" in checkpoint_url else 1_0_0_0
__lowercase = target_max_size / current_max_size
__lowercase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = F.to_tensor(_SCREAMING_SNAKE_CASE )
__lowercase = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("Converting model..." )
# load original state dict
__lowercase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowercase = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__lowercase = state_dict.pop(_SCREAMING_SNAKE_CASE )
__lowercase = val
# create HuggingFace model and load state dict
__lowercase = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__lowercase = 1_5
__lowercase = 2
__lowercase = {0: "table", 1: "table rotated"}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_2_5
__lowercase = 6
__lowercase = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = DetrImageProcessor(
format="coco_detection" , max_size=8_0_0 if "detection" in checkpoint_url else 1_0_0_0 )
__lowercase = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
__lowercase = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
__lowercase = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_SCREAMING_SNAKE_CASE )
__lowercase = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" )
__lowercase = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
__lowercase = model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
__lowercase = (1, 1_5, 3)
__lowercase = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
__lowercase = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
__lowercase = (1, 1_2_5, 7)
__lowercase = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
__lowercase = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
__lowercase = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : List[str] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 707
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = ["""image_processor""", """tokenizer"""]
_snake_case : Dict = """BlipImageProcessor"""
_snake_case : Tuple = """AutoTokenizer"""
def __init__( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = False
super().__init__(lowerCamelCase , lowerCamelCase )
__lowercase = self.image_processor
def __call__( self : Optional[int] , lowerCamelCase : ImageInput = None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__lowercase = self.tokenizer
__lowercase = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
# add pixel_values
__lowercase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
if text is not None:
__lowercase = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
else:
__lowercase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def _snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 708
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case__ : Dict = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = feature_size
__lowercase = sampling_rate
__lowercase = padding_value
__lowercase = kwargs.pop("padding_side" , "right" )
__lowercase = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def _snake_case ( self : Any , lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCamelCase : Union[bool, str, PaddingStrategy] = True , lowerCamelCase : Optional[int] = None , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowercase = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__lowercase = "tf"
elif is_torch_tensor(lowerCamelCase ):
__lowercase = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__lowercase = "np"
else:
raise ValueError(
f"""type of {first_element} unknown: {type(lowerCamelCase )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowercase = to_numpy(lowerCamelCase )
else:
__lowercase = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowercase = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__lowercase = []
for i in range(lowerCamelCase ):
__lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowercase = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowercase = PaddingStrategy.MAX_LENGTH
__lowercase = {}
for i in range(lowerCamelCase ):
# padding
__lowercase = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowercase = []
if value.dtype is np.dtype(np.floataa ):
__lowercase = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def _snake_case ( self : Optional[int] , lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowercase = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowercase = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__lowercase = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__lowercase = np.pad(
processed_features["attention_mask"] , (0, difference) )
__lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowercase = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowercase = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowercase = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _snake_case ( self : int , lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowercase = processed_features["attention_mask"][:max_length]
return processed_features
def _snake_case ( self : List[str] , lowerCamelCase : Tuple=False , lowerCamelCase : str=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = padding
else:
__lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 709
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : int = """WhisperFeatureExtractor"""
_snake_case : Tuple = """WhisperTokenizer"""
def __init__( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__(lowerCamelCase , lowerCamelCase )
__lowercase = self.feature_extractor
__lowercase = False
def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=None , lowerCamelCase : Any=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase , language=lowerCamelCase , no_timestamps=lowerCamelCase )
def __call__( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
__lowercase = kwargs.pop("audio" , lowerCamelCase )
__lowercase = kwargs.pop("sampling_rate" , lowerCamelCase )
__lowercase = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowercase = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
__lowercase = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase = encodings["input_ids"]
return inputs
def _snake_case ( self : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Any , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[Any]="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(lowerCamelCase , return_tensors=lowerCamelCase )
| 710
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655
| 0
|
import os
import sys
import unittest
snake_case__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
snake_case__ : Optional[int] = os.path.join(git_repo_path, """src""", """transformers""")
snake_case__ : Tuple = """
{0} = None
"""
snake_case__ : List[str] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
snake_case__ : Dict = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(lowerCamelCase )
__lowercase = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(lowerCamelCase , "tokenizers" )
__lowercase = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(lowerCamelCase , "tensorflow_text" )
__lowercase = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(lowerCamelCase , "sentencepiece_and_tokenizers" )
__lowercase = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(lowerCamelCase , "sentencepiece_and_tensorflow_text" )
__lowercase = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(lowerCamelCase , "sentencepiece_and_tokenizers_and_vision" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowerCamelCase )
self.assertIn("tensorflow_text" , lowerCamelCase )
self.assertIn("sentencepiece_and_tokenizers" , lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowerCamelCase , "\nCONSTANT = None\n" )
__lowercase = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowerCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__lowercase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
__lowercase = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
__lowercase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowerCamelCase )
| 711
|
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case__ : List[Any] = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ):
__lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : DDPMScheduler , lowerCamelCase : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
__lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
if latents is None:
__lowercase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowercase = latents.to(lowerCamelCase )
__lowercase = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self : List[Any] , lowerCamelCase : List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
__lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Tuple , lowerCamelCase : Any=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase , __lowercase = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
__lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : List[Any] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : Any , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 100 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowercase = self._execution_device
__lowercase = guidance_scale > 1.0
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = torch.cat(lowerCamelCase , dim=0 )
__lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__lowercase = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__lowercase = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
__lowercase = self.scheduler.timesteps
__lowercase = self.unet.config.in_channels
__lowercase , __lowercase = downscale_height_and_width(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
__lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = {"image_embeds": image_embeds}
__lowercase = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase , __lowercase = variance_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , )[0]
# post-processing
__lowercase = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowercase = image * 0.5 + 0.5
__lowercase = image.clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 712
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """openai/whisper-base"""
_snake_case : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_snake_case : Any = """transcriber"""
_snake_case : Any = WhisperProcessor
_snake_case : Optional[int] = WhisperForConditionalGeneration
_snake_case : str = ["""audio"""]
_snake_case : Optional[int] = ["""text"""]
def _snake_case ( self : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 655
| 0
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : int , *lowerCamelCase : Any , **lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(lowerCamelCase )
def __call__( self : Optional[Any] , lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Union[str, Any] , **lowerCamelCase : Any ):
'''simple docstring'''
return {}, {}, {}
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = load_image(lowerCamelCase )
__lowercase = image.size
__lowercase = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def _snake_case ( self : int , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model(**lowerCamelCase )
return model_outputs
def _snake_case ( self : int , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = model_outputs.predicted_depth
__lowercase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=lowerCamelCase )
__lowercase = prediction.squeeze().cpu().numpy()
__lowercase = (output * 255 / np.max(lowerCamelCase )).astype("uint8" )
__lowercase = Image.fromarray(lowerCamelCase )
__lowercase = {}
__lowercase = predicted_depth
__lowercase = depth
return output_dict
| 713
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655
| 0
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
snake_case__ : Tuple = parse(importlib.metadata.version("""torch"""))
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
__lowercase = STR_OPERATION_TO_FUNC[operation]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = parse(importlib.metadata.version(_SCREAMING_SNAKE_CASE ) )
return operation(_SCREAMING_SNAKE_CASE , parse(_SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 714
|
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _A :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(lowerCamelCase ) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(lowerCamelCase )) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = decoded_message + letter
return decoded_message
| 655
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Optional[Any] = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ["""LayoutLMv3FeatureExtractor"""]
snake_case__ : List[str] = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
'''simple docstring'''
_snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case_ ( ):
if os.name == "nt":
__lowercase = CursorInfo()
__lowercase = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
__lowercase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 655
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
_snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_snake_case : str = "text"
_snake_case : str = "labels"
def _snake_case ( self : str , lowerCamelCase : Any ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 716
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[Any] = """yolos"""
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : int=12 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Optional[Any]=3_072 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[Any]=1e-12 , lowerCamelCase : Optional[Any]=[512, 864] , lowerCamelCase : str=16 , lowerCamelCase : Dict=3 , lowerCamelCase : str=True , lowerCamelCase : List[Any]=100 , lowerCamelCase : Dict=True , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=1 , lowerCamelCase : str=5 , lowerCamelCase : Any=2 , lowerCamelCase : str=5 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = version.parse("""1.11""" )
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : str ):
'''simple docstring'''
return 1e-4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 12
| 655
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
snake_case__ : Tuple = logging.getLogger(__name__)
torch.set_grad_enabled(False)
snake_case__ : int = """cuda""" if torch.cuda.is_available() else """cpu"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_0_0 , _SCREAMING_SNAKE_CASE=" " ):
__lowercase = text.split(_SCREAMING_SNAKE_CASE )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_SCREAMING_SNAKE_CASE ):
titles.append(title if title is not None else "" )
texts.append(_SCREAMING_SNAKE_CASE )
return {"title": titles, "text": texts}
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )["input_ids"]
__lowercase = ctx_encoder(input_ids.to(device=_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__lowercase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__lowercase = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc )
# And compute the embeddings
__lowercase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_SCREAMING_SNAKE_CASE )
__lowercase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__lowercase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
__lowercase = dataset.map(
partial(_SCREAMING_SNAKE_CASE , ctx_encoder=_SCREAMING_SNAKE_CASE , ctx_tokenizer=_SCREAMING_SNAKE_CASE ) , batched=_SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=_SCREAMING_SNAKE_CASE , )
# And finally save your dataset
__lowercase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_SCREAMING_SNAKE_CASE )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__lowercase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_SCREAMING_SNAKE_CASE )
# And save the index
__lowercase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_SCREAMING_SNAKE_CASE )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _A :
'''simple docstring'''
_snake_case : str = field(
default=str(Path(_lowercase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_snake_case : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_snake_case : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_snake_case : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_snake_case : Optional[str] = field(
default=str(Path(_lowercase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class _A :
'''simple docstring'''
_snake_case : Optional[int] = field(
default=_lowercase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_snake_case : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class _A :
'''simple docstring'''
_snake_case : int = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_snake_case : int = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
snake_case__ : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
snake_case__ : str = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
snake_case__ : List[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 717
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655
| 0
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str=13 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Any=2 , lowerCamelCase : int=3 , lowerCamelCase : Optional[int]=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : List[str]="silu" , lowerCamelCase : Any=3 , lowerCamelCase : List[Any]=32 , lowerCamelCase : int=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=True , lowerCamelCase : int=True , lowerCamelCase : int=10 , lowerCamelCase : str=None , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = last_hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : str = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : Any = False
_snake_case : int = False
_snake_case : Tuple = False
_snake_case : str = False
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = MobileViTModelTester(self )
__lowercase = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any ):
__lowercase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case_ ( ):
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__lowercase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__lowercase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__lowercase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 718
|
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
'''simple docstring'''
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowercase = 0
__lowercase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowercase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowercase = i
__lowercase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowerCamelCase : int , **lowerCamelCase : int ):
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 720
|
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655
| 0
|
from collections import deque
class _A :
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = process_name # process name
__lowercase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowercase = arrival_time
__lowercase = burst_time # remaining burst time
__lowercase = 0 # total time of the process wait in ready queue
__lowercase = 0 # time from arrival time to completion time
class _A :
'''simple docstring'''
def __init__( self : Any , lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : deque[Process] , lowerCamelCase : int , ):
'''simple docstring'''
__lowercase = number_of_queues
# time slice of queues that round robin algorithm applied
__lowercase = time_slices
# unfinished process is in this ready_queue
__lowercase = queue
# current time
__lowercase = current_time
# finished process is in this sequence queue
__lowercase = deque()
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _snake_case ( self : Any , lowerCamelCase : list[Process] ):
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _snake_case ( self : Optional[int] , lowerCamelCase : list[Process] ):
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[Process] ):
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _snake_case ( self : str , lowerCamelCase : deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def _snake_case ( self : int , lowerCamelCase : Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _snake_case ( self : List[Any] , lowerCamelCase : deque[Process] ):
'''simple docstring'''
__lowercase = deque() # sequence deque of finished process
while len(lowerCamelCase ) != 0:
__lowercase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowercase = 0
# set the process's turnaround time because it is finished
__lowercase = self.current_time - cp.arrival_time
# set the completion time
__lowercase = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase )
self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _snake_case ( self : str , lowerCamelCase : deque[Process] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCamelCase ) ):
__lowercase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowercase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowercase = 0
# set the finish time
__lowercase = self.current_time
# update the process' turnaround time because it is finished
__lowercase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase )
self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
__lowercase , __lowercase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case__ : List[Any] = Process("""P1""", 0, 53)
snake_case__ : List[str] = Process("""P2""", 0, 17)
snake_case__ : List[Any] = Process("""P3""", 0, 68)
snake_case__ : Union[str, Any] = Process("""P4""", 0, 24)
snake_case__ : Union[str, Any] = 3
snake_case__ : Union[str, Any] = [17, 25]
snake_case__ : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
snake_case__ : Optional[Any] = Process("""P1""", 0, 53)
snake_case__ : Dict = Process("""P2""", 0, 17)
snake_case__ : Dict = Process("""P3""", 0, 68)
snake_case__ : int = Process("""P4""", 0, 24)
snake_case__ : Union[str, Any] = 3
snake_case__ : Optional[Any] = [17, 25]
snake_case__ : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
snake_case__ : List[str] = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case__ : List[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 721
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( __magic_name__ = "AAPL" )-> str:
"""simple docstring"""
snake_case_ : int = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
snake_case_ : Dict = BeautifulSoup(requests.get(__magic_name__ ).text ,"html.parser" )
snake_case_ : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" ,class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 656
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCamelCase : Union[str, Any] = 250004
__lowerCamelCase : Dict = 250020
@require_sentencepiece
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MBartTokenizer
a__ = MBartTokenizerFast
a__ = True
a__ = True
def _A ( self :Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : int = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[str] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case_ : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Any = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
snake_case_ : str = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
snake_case_ : Any = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/mbart-large-en-ro'''
a__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
a__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
a__ = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def _A ( cls :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
snake_case_ : int = 1
return cls
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
snake_case_ : int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
snake_case_ : Any = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
snake_case_ : Any = 10
snake_case_ : str = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :str ) -> str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_026, 250_001] )
def _A ( self :Tuple ) -> int:
'''simple docstring'''
snake_case_ : List[str] = tempfile.mkdtemp()
snake_case_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def _A ( self :str ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Optional[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
snake_case_ : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case_ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt" )
snake_case_ : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt" )
snake_case_ : Optional[Any] = targets["input_ids"]
snake_case_ : int = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3_034, 2, 250_004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 656
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''wav2vec2'''
def __init__( self :int , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Any=768 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Dict=3_072 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=1E-5 , lowerCAmelCase__ :List[str]="group" , lowerCAmelCase__ :Tuple="gelu" , lowerCAmelCase__ :str=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__ :List[Any]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ :List[Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=128 , lowerCAmelCase__ :List[str]=16 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=0.0_5 , lowerCAmelCase__ :Tuple=10 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Dict=10 , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=320 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Optional[Any]=100 , lowerCAmelCase__ :List[Any]=256 , lowerCAmelCase__ :int=256 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Any="sum" , lowerCAmelCase__ :str=False , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :int=256 , lowerCAmelCase__ :Dict=(512, 512, 512, 512, 1_500) , lowerCAmelCase__ :List[str]=(5, 3, 3, 1, 1) , lowerCAmelCase__ :Tuple=(1, 2, 3, 1, 1) , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :Any=0 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
snake_case_ : int = hidden_size
snake_case_ : List[str] = feat_extract_norm
snake_case_ : List[Any] = feat_extract_activation
snake_case_ : str = list(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = list(lowerCAmelCase__ )
snake_case_ : Tuple = list(lowerCAmelCase__ )
snake_case_ : List[Any] = conv_bias
snake_case_ : List[Any] = num_conv_pos_embeddings
snake_case_ : List[Any] = num_conv_pos_embedding_groups
snake_case_ : Tuple = len(self.conv_dim )
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Any = num_attention_heads
snake_case_ : int = hidden_dropout
snake_case_ : List[str] = attention_dropout
snake_case_ : List[Any] = activation_dropout
snake_case_ : int = feat_proj_dropout
snake_case_ : Tuple = final_dropout
snake_case_ : int = layerdrop
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Any = initializer_range
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Any = do_stable_layer_norm
snake_case_ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : Optional[Any] = apply_spec_augment
snake_case_ : str = mask_time_prob
snake_case_ : Any = mask_time_length
snake_case_ : Dict = mask_time_min_masks
snake_case_ : Optional[int] = mask_feature_prob
snake_case_ : List[str] = mask_feature_length
snake_case_ : Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ : Optional[int] = num_codevectors_per_group
snake_case_ : Union[str, Any] = num_codevector_groups
snake_case_ : Optional[int] = contrastive_logits_temperature
snake_case_ : int = feat_quantizer_dropout
snake_case_ : List[Any] = num_negatives
snake_case_ : List[Any] = codevector_dim
snake_case_ : str = proj_codevector_dim
snake_case_ : Dict = diversity_loss_weight
# ctc loss
snake_case_ : str = ctc_loss_reduction
snake_case_ : Union[str, Any] = ctc_zero_infinity
# adapter
snake_case_ : List[str] = add_adapter
snake_case_ : Optional[Any] = adapter_kernel_size
snake_case_ : int = adapter_stride
snake_case_ : List[str] = num_adapter_layers
snake_case_ : List[str] = output_hidden_size or hidden_size
snake_case_ : str = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ : int = list(lowerCAmelCase__ )
snake_case_ : Dict = list(lowerCAmelCase__ )
snake_case_ : Optional[int] = list(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = xvector_output_dim
@property
def _A ( self :str ) -> Dict:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 656
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A_ :
"""simple docstring"""
a__ = PegasusConfig
a__ = {}
a__ = '''gelu'''
def __init__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str]=13 , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Optional[Any]=99 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :str=37 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[Any]=40 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Union[str, Any]=0 , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Any = batch_size
snake_case_ : Optional[int] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Tuple = use_labels
snake_case_ : Optional[int] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[Any] = eos_token_id
snake_case_ : str = pad_token_id
snake_case_ : List[Any] = bos_token_id
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : Any = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> str:
'''simple docstring'''
snake_case_ : Tuple = TFPegasusModel(config=lowerCAmelCase__ ).get_decoder()
snake_case_ : Optional[int] = inputs_dict["input_ids"]
snake_case_ : Tuple = input_ids[:1, :]
snake_case_ : Dict = inputs_dict["attention_mask"][:1, :]
snake_case_ : List[Any] = inputs_dict["head_mask"]
snake_case_ : Optional[Any] = 1
# first forward pass
snake_case_ : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None ,__magic_name__=None ,__magic_name__=None ,__magic_name__=None ,)-> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
snake_case_ : Tuple = tf.cast(tf.math.not_equal(__magic_name__ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
snake_case_ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a__ = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ = True
a__ = False
a__ = False
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = TFPegasusModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=lowerCAmelCase__ )
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a__ = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a__ = '''google/pegasus-xsum'''
@cached_property
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _A ( self :Union[str, Any] , **lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.translate_src_text(**lowerCAmelCase__ )
assert self.expected_text == generated_words
def _A ( self :str , **lowerCAmelCase__ :Dict ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer(self.src_text , **lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="tf" )
snake_case_ : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase__ , )
snake_case_ : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )
return generated_words
@slow
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 656
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656
| 1
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCamelCase : List[str] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : Any = '''cpu'''
__lowerCamelCase : Union[str, Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCamelCase : Union[str, Any] = '''path-to-your-trained-model'''
__lowerCamelCase : int = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCamelCase : List[str] = pipe.to(device)
# to channels last
__lowerCamelCase : List[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCamelCase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCamelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCamelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCamelCase : int = torch.randn(2, 4, 64, 64)
__lowerCamelCase : List[str] = torch.rand(1) * 999
__lowerCamelCase : Tuple = torch.randn(2, 77, 768)
__lowerCamelCase : List[str] = (sample, timestep, encoder_hidden_status)
try:
__lowerCamelCase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCamelCase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCamelCase : Union[str, Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCamelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCamelCase : Optional[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCamelCase : List[str] = 666
__lowerCamelCase : Tuple = torch.Generator(device).manual_seed(seed)
__lowerCamelCase : Any = {'''generator''': generator}
if args.steps is not None:
__lowerCamelCase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCamelCase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 656
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Optional[Any] = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ (a_ ):
"""simple docstring"""
a__ = '''dandelin/vilt-b32-finetuned-vqa'''
a__ = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
a__ = '''image_qa'''
a__ = AutoProcessor
a__ = AutoModelForVisualQuestionAnswering
a__ = ['''image''', '''text''']
a__ = ['''text''']
def __init__( self :int , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :int , lowerCAmelCase__ :"Image" , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="pt" )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> Dict:
'''simple docstring'''
with torch.no_grad():
return self.model(**lowerCAmelCase__ ).logits
def _A ( self :int , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 656
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 656
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656
| 1
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"-m" ,"--pretrained_model_name_or_path" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,)
parser.add_argument(
"-c" ,"--caption" ,type=__magic_name__ ,default="robotic cat with wings" ,help="Text used to generate images." ,)
parser.add_argument(
"-n" ,"--images_num" ,type=__magic_name__ ,default=4 ,help="How much images to generate." ,)
parser.add_argument(
"-s" ,"--seed" ,type=__magic_name__ ,default=42 ,help="Seed for random process." ,)
parser.add_argument(
"-ci" ,"--cuda_id" ,type=__magic_name__ ,default=0 ,help="cuda_id." ,)
snake_case_ : Union[str, Any] = parser.parse_args()
return args
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
if not len(__magic_name__ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
snake_case_, snake_case_ : Optional[Any] = imgs[0].size
snake_case_ : int = Image.new("RGB" ,size=(cols * w, rows * h) )
snake_case_, snake_case_ : Any = grid.size
for i, img in enumerate(__magic_name__ ):
grid.paste(__magic_name__ ,box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( __magic_name__ ,__magic_name__="robotic cat with wings" ,__magic_name__=7.5 ,__magic_name__=50 ,__magic_name__=1 ,__magic_name__=42 ,)-> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = torch.Generator(pipeline.device ).manual_seed(__magic_name__ )
snake_case_ : Any = pipeline(
__magic_name__ ,guidance_scale=__magic_name__ ,num_inference_steps=__magic_name__ ,generator=__magic_name__ ,num_images_per_prompt=__magic_name__ ,).images
snake_case_ : Optional[Any] = int(math.sqrt(__magic_name__ ) )
snake_case_ : Dict = image_grid(__magic_name__ ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
__lowerCamelCase : Dict = parse_args()
# Load models and create wrapper for stable diffusion
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__lowerCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__lowerCamelCase : List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__lowerCamelCase : str = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__lowerCamelCase : Any = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__lowerCamelCase : List[str] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__lowerCamelCase : str = unet.to(torch.device('''cuda''', args.cuda_id))
__lowerCamelCase : int = pipeline.to(unet.device)
__lowerCamelCase , __lowerCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__lowerCamelCase : Tuple = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 656
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : str = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger('''transformers.models.speecht5''')
__lowerCamelCase : Any = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__lowerCamelCase : Any = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__lowerCamelCase : List[str] = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__lowerCamelCase : str = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__lowerCamelCase : Dict = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__lowerCamelCase : Union[str, Any] = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__lowerCamelCase : Tuple = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__lowerCamelCase : List[str] = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__lowerCamelCase : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCamelCase : Union[str, Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : int = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Tuple = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__lowerCamelCase : Any = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__lowerCamelCase : Any = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__lowerCamelCase : Dict = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
for attribute in key.split("." ):
snake_case_ : Optional[int] = getattr(__magic_name__ ,__magic_name__ )
if weight_type is not None:
snake_case_ : Union[str, Any] = getattr(__magic_name__ ,__magic_name__ ).shape
else:
snake_case_ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ : Any = value
elif weight_type == "weight_g":
snake_case_ : List[Any] = value
elif weight_type == "weight_v":
snake_case_ : Tuple = value
elif weight_type == "bias":
snake_case_ : List[Any] = value
elif weight_type == "running_mean":
snake_case_ : int = value
elif weight_type == "running_var":
snake_case_ : int = value
elif weight_type == "num_batches_tracked":
snake_case_ : Union[str, Any] = value
else:
snake_case_ : Dict = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_, snake_case_ : Any = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = []
if task == "s2t":
snake_case_ : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ : Optional[Any] = MAPPING_S2T
snake_case_ : str = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case_ : List[str] = None
snake_case_ : int = MAPPING_T2S
snake_case_ : Any = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case_ : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ : List[Any] = MAPPING_S2S
snake_case_ : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__magic_name__ ,__magic_name__ ):
logger.info(F'''{name} was ignored''' )
continue
snake_case_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,hf_model.config.feat_extract_norm == "group" ,)
snake_case_ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case_, snake_case_ : Dict = key.split(".*." )
if prefix in name and suffix in name:
snake_case_ : List[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case_ : Dict = True
if "*" in mapped_key:
snake_case_ : Tuple = name.split(__magic_name__ )[0].split("." )[-2]
snake_case_ : Union[str, Any] = mapped_key.replace("*" ,__magic_name__ )
if "weight_g" in name:
snake_case_ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
snake_case_ : Union[str, Any] = "weight_v"
elif "bias" in name:
snake_case_ : List[Any] = "bias"
elif "weight" in name:
snake_case_ : List[str] = "weight"
elif "running_mean" in name:
snake_case_ : List[Any] = "running_mean"
elif "running_var" in name:
snake_case_ : str = "running_var"
elif "num_batches_tracked" in name:
snake_case_ : Optional[int] = "num_batches_tracked"
else:
snake_case_ : Any = None
set_recursively(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Dict = full_name.split("conv_layers." )[-1]
snake_case_ : Dict = name.split("." )
snake_case_ : List[Any] = int(items[0] )
snake_case_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None ,__magic_name__=None ,)-> Optional[Any]:
"""simple docstring"""
if config_path is not None:
snake_case_ : Optional[int] = SpeechTaConfig.from_pretrained(__magic_name__ )
else:
snake_case_ : int = SpeechTaConfig()
if task == "s2t":
snake_case_ : Optional[int] = config.max_text_positions
snake_case_ : List[Any] = SpeechTaForSpeechToText(__magic_name__ )
elif task == "t2s":
snake_case_ : str = 1876
snake_case_ : Any = 600
snake_case_ : Any = config.max_speech_positions
snake_case_ : Optional[Any] = SpeechTaForTextToSpeech(__magic_name__ )
elif task == "s2s":
snake_case_ : Tuple = 1876
snake_case_ : str = config.max_speech_positions
snake_case_ : Dict = SpeechTaForSpeechToSpeech(__magic_name__ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
snake_case_ : Dict = SpeechTaTokenizer(__magic_name__ ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case_ : Tuple = AddedToken("<mask>" ,lstrip=__magic_name__ ,rstrip=__magic_name__ )
snake_case_ : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case_ : int = SpeechTaFeatureExtractor()
snake_case_ : Dict = SpeechTaProcessor(tokenizer=__magic_name__ ,feature_extractor=__magic_name__ )
processor.save_pretrained(__magic_name__ )
snake_case_ : Optional[Any] = torch.load(__magic_name__ )
recursively_load_weights(fairseq_checkpoint["model"] ,__magic_name__ ,__magic_name__ )
model.save_pretrained(__magic_name__ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__magic_name__ )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _A ( self :str , lowerCAmelCase__ :Tuple=0 ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCAmelCase__ ) )
snake_case_ : Dict = np.random.RandomState(lowerCAmelCase__ )
snake_case_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.7_5,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[str] = self.get_dummy_inputs()
snake_case_ : Optional[int] = pipe(**lowerCAmelCase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ : List[str] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = self.get_dummy_inputs()
snake_case_ : Tuple = pipe(**lowerCAmelCase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Tuple = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# warmup pass to apply optimizations
snake_case_ : Dict = pipe(**self.get_dummy_inputs() )
snake_case_ : List[str] = self.get_dummy_inputs()
snake_case_ : Optional[int] = pipe(**lowerCAmelCase__ ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : List[str] = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Any = self.get_dummy_inputs()
snake_case_ : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
snake_case_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Union[str, Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[str] = self.get_dummy_inputs()
snake_case_ : int = pipe(**lowerCAmelCase__ ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Tuple = self.get_dummy_inputs()
snake_case_ : Tuple = pipe(**lowerCAmelCase__ ).images
snake_case_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Tuple = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
@property
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Any = ort.SessionOptions()
snake_case_ : Tuple = False
return options
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[Any] = "A fantasy landscape, trending on artstation"
snake_case_ : Optional[Any] = np.random.RandomState(0 )
snake_case_ : Dict = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images
snake_case_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ : Dict = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : Tuple = init_image.resize((768, 512) )
snake_case_ : int = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
snake_case_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : int = "A fantasy landscape, trending on artstation"
snake_case_ : Dict = np.random.RandomState(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Any = output.images
snake_case_ : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 656
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
a__ = '''upernet'''
def __init__( self :List[str] , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :str=0.0_2 , lowerCAmelCase__ :List[str]=[1, 2, 3, 6] , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[Any]=0.4 , lowerCAmelCase__ :Dict=384 , lowerCAmelCase__ :int=256 , lowerCAmelCase__ :Any=1 , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :List[str]=255 , **lowerCAmelCase__ :List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Any = backbone_config.get("model_type" )
snake_case_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Optional[int] = config_class.from_dict(lowerCAmelCase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : str = hidden_size
snake_case_ : Dict = initializer_range
snake_case_ : List[str] = pool_scales
snake_case_ : Tuple = use_auxiliary_head
snake_case_ : int = auxiliary_loss_weight
snake_case_ : Optional[Any] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : Dict = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 656
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656
| 1
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class A_ :
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Optional[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = start
snake_case_ : Tuple = end
snake_case_ : Optional[int] = val
snake_case_ : Optional[int] = (start + end) // 2
snake_case_ : int = left
snake_case_ : Union[str, Any] = right
def __repr__( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :Sequence , lowerCAmelCase__ :List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = collection
snake_case_ : Optional[int] = function
if self.collection:
snake_case_ : Tuple = self._build_tree(0 , len(lowerCAmelCase__ ) - 1 )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
self._update_tree(self.root , lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any ) -> Any:
'''simple docstring'''
return self._query_range(self.root , lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> List[str]:
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.collection[start] )
snake_case_ : List[Any] = (start + end) // 2
snake_case_ : str = self._build_tree(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Dict = self._build_tree(mid + 1 , lowerCAmelCase__ )
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.fn(left.val , right.val ) , lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
if node.start == i and node.end == i:
snake_case_ : str = val
return
if i <= node.mid:
self._update_tree(node.left , lowerCAmelCase__ , lowerCAmelCase__ )
else:
self._update_tree(node.right , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = self.fn(node.left.val , node.right.val )
def _A ( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str ) -> Optional[Any]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowerCAmelCase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowerCAmelCase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
if self.root is not None:
snake_case_ : Optional[Any] = Queue()
queue.put(self.root )
while not queue.empty():
snake_case_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 656
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''swin2sr'''
a__ = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Dict=64 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :List[str]=180 , lowerCAmelCase__ :Dict=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__ :Optional[int]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__ :List[str]=8 , lowerCAmelCase__ :Dict=2.0 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=1E-5 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Tuple=1.0 , lowerCAmelCase__ :List[Any]="1conv" , lowerCAmelCase__ :Optional[Any]="pixelshuffle" , **lowerCAmelCase__ :Any , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Tuple = image_size
snake_case_ : List[Any] = patch_size
snake_case_ : int = num_channels
snake_case_ : Tuple = embed_dim
snake_case_ : Any = depths
snake_case_ : List[str] = len(lowerCAmelCase__ )
snake_case_ : Tuple = num_heads
snake_case_ : int = window_size
snake_case_ : List[str] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = drop_path_rate
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = use_absolute_embeddings
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = upscale
snake_case_ : str = img_range
snake_case_ : Any = resi_connection
snake_case_ : Any = upsampler
| 656
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(__magic_name__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 656
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list:
"""simple docstring"""
snake_case_ : Any = len(__magic_name__ )
snake_case_ : int = []
for i in range(len(__magic_name__ ) - pat_len + 1 ):
snake_case_ : List[str] = True
for j in range(__magic_name__ ):
if s[i + j] != pattern[j]:
snake_case_ : List[Any] = False
break
if match_found:
position.append(__magic_name__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 656
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656
| 1
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCamelCase : int = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Tuple=1 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = tokenizer
snake_case_ : Union[str, Any] = dataset
snake_case_ : str = len(lowerCAmelCase__ ) if n_tasks is None else n_tasks
snake_case_ : Any = n_copies
def __iter__( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
snake_case_ : List[Any] = self.tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = start_length
snake_case_ : str = eof_strings
snake_case_ : Optional[int] = tokenizer
def __call__( self :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case_ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCAmelCase__ )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : List[Any] = re.split("(%s)" % "|".join(__magic_name__ ) ,__magic_name__ )
# last string should be ""
return "".join(string_list[:-2] )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=20 ,**__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Dict = defaultdict(__magic_name__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__magic_name__ ) ):
with torch.no_grad():
snake_case_ : List[str] = batch["ids"].shape[-1]
snake_case_ : Any = accelerator.unwrap_model(__magic_name__ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] ,num_return_sequences=__magic_name__ ,**__magic_name__ )
# each task is generated batch_size times
snake_case_ : Tuple = batch["task_id"].repeat(__magic_name__ )
snake_case_ : Any = accelerator.pad_across_processes(
__magic_name__ ,dim=1 ,pad_index=tokenizer.pad_token_id )
snake_case_, snake_case_ : Dict = accelerator.gather((generated_tokens, generated_tasks) )
snake_case_ : List[Any] = generated_tokens.cpu().numpy()
snake_case_ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__magic_name__ ,__magic_name__ ):
gen_token_dict[task].append(__magic_name__ )
snake_case_ : List[str] = [[] for _ in range(__magic_name__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case_ : List[Any] = tokenizer.decode(__magic_name__ ,skip_special_tokens=__magic_name__ ,clean_up_tokenization_spaces=__magic_name__ )
code_gens[task].append(remove_last_block(__magic_name__ ) )
return code_gens
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser(__magic_name__ )
snake_case_ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case_ : int = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case_ : Optional[Any] = "false"
if args.num_workers is None:
snake_case_ : Tuple = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case_ : Any = Accelerator()
set_seed(args.seed ,device_specific=__magic_name__ )
# Load model and tokenizer
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ : Optional[int] = tokenizer.eos_token
snake_case_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case_ : List[str] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 ,__magic_name__ ,__magic_name__ )] ),
}
# Load evaluation dataset and metric
snake_case_ : Optional[int] = load_dataset("openai_humaneval" )
snake_case_ : int = load_metric("code_eval" )
snake_case_ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
snake_case_ : int = args.n_samples // args.batch_size
snake_case_ : Any = TokenizedDataset(__magic_name__ ,human_eval["test"] ,n_copies=__magic_name__ ,n_tasks=__magic_name__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case_ : Any = DataLoader(__magic_name__ ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case_ : List[Any] = code_eval_metric.compute(references=[""] ,predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
snake_case_, snake_case_ : str = accelerator.prepare(__magic_name__ ,__magic_name__ )
snake_case_ : str = complete_code(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,n_tasks=__magic_name__ ,batch_size=args.batch_size ,**__magic_name__ ,)
if accelerator.is_main_process:
snake_case_ : List[Any] = []
for task in tqdm(range(__magic_name__ ) ):
snake_case_ : Dict = human_eval["test"][task]["test"]
snake_case_ : Dict = F'''check({human_eval['test'][task]['entry_point']})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
snake_case_, snake_case_ : Tuple = code_eval_metric.compute(
references=__magic_name__ ,predictions=__magic_name__ ,num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file ,"w" ) as fp:
json.dump(__magic_name__ ,__magic_name__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 656
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656
| 1
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int = 13 , lowerCAmelCase__ :int = 64 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 128 , lowerCAmelCase__ :List[str]=[16, 32, 64, 128] , lowerCAmelCase__ :int = 7 , lowerCAmelCase__ :int = 4 , lowerCAmelCase__ :int = 37 , lowerCAmelCase__ :str = "gelu" , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :float = 0.0_2 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 128 , lowerCAmelCase__ :List[int] = [2, 2, 2, 2] , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 2 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Dict = batch_size
snake_case_ : Dict = image_size
snake_case_ : List[str] = patch_size
snake_case_ : Tuple = num_channels
snake_case_ : List[Any] = is_training
snake_case_ : Union[str, Any] = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : Optional[int] = initializer_range
snake_case_ : int = encoder_stride
snake_case_ : Dict = num_attention_outputs
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : str = embed_dim + 1
snake_case_ : Optional[int] = resolution
snake_case_ : List[Any] = depths
snake_case_ : Union[str, Any] = hidden_sizes
snake_case_ : List[str] = dim
snake_case_ : int = mlp_expansion_ratio
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = self.get_config()
return config, pixel_values, labels
def _A ( self :int ) -> int:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = TFEfficientFormerModel(config=lowerCAmelCase__ )
snake_case_ : Tuple = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.type_sequence_label_size
snake_case_ : str = TFEfficientFormerForImageClassification(lowerCAmelCase__ )
snake_case_ : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Tuple = TFEfficientFormerForImageClassification(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ : Union[str, Any] = config_and_inputs
snake_case_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = TFEfficientFormerModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_, snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[Any] = [*signature.parameters.keys()]
snake_case_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _A ( self :Dict ) -> Any:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ):
snake_case_ : Dict = model_class(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
snake_case_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case_ : Dict = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case_ : List[Any] = seq_length * self.model_tester.chunk_length
else:
snake_case_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case_ : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
snake_case_ : Any = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Any = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case_, snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :str=False ) -> Dict:
'''simple docstring'''
snake_case_ : int = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = TFEfficientFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : Tuple = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Tuple = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : List[str] = getattr(self.model_tester , "key_length" , lowerCAmelCase__ )
snake_case_ : Optional[Any] = getattr(self.model_tester , "chunk_length" , lowerCAmelCase__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case_ : Optional[int] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = False
snake_case_ : Tuple = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
snake_case_ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[Any] = True
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
snake_case_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case_ : str = model_class(lowerCAmelCase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case_ : int = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case_ : int = model(lowerCAmelCase__ )
self.assertTrue(outputs_dict is not None )
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case_ : Dict = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : Any = image_processor(images=lowerCAmelCase__ , return_tensors="tf" )
# forward pass
snake_case_ : int = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
snake_case_ : int = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case_ : Optional[int] = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="tf" )
# forward pass
snake_case_ : List[Any] = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
snake_case_ : Optional[int] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case_ : Any = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 656
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656
| 1
|
'''simple docstring'''
class A_ :
"""simple docstring"""
def __init__( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = ""
snake_case_ : List[Any] = ""
snake_case_ : Dict = []
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case_ : Any = self.__min_dist_top_down_dp(lowerCAmelCase__ , n - 1 )
snake_case_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase__ )
snake_case_ : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case_ : Union[str, Any] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
def _A ( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : int = worda
snake_case_ : Optional[int] = worda
snake_case_ : int = [[-1 for _ in range(len(lowerCAmelCase__ ) )] for _ in range(len(lowerCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase__ ) - 1 , len(lowerCAmelCase__ ) - 1 )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = worda
snake_case_ : Optional[int] = worda
snake_case_ : Union[str, Any] = len(lowerCAmelCase__ )
snake_case_ : List[str] = len(lowerCAmelCase__ )
snake_case_ : str = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case_ : Optional[Any] = j
elif j == 0: # second string is empty
snake_case_ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case_ : Optional[int] = self.dp[i - 1][j - 1]
else:
snake_case_ : int = self.dp[i][j - 1]
snake_case_ : List[str] = self.dp[i - 1][j]
snake_case_ : List[Any] = self.dp[i - 1][j - 1]
snake_case_ : Any = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase : Any = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
__lowerCamelCase : Dict = input('''Enter the first string: ''').strip()
__lowerCamelCase : int = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 656
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCamelCase : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A_ (a_ ):
"""simple docstring"""
a__ = field(default=a_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
a__ = field(
default=a_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
a__ = field(
default=a_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
a__ = field(
default=a_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Optional[Any] = v.to_dict()
return d
| 656
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case_ : Optional[Any] = [p / w for p, w in zip(__magic_name__ ,__magic_name__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case_ : List[Any] = sorted(__magic_name__ )
# declaring useful variables
snake_case_ : Optional[int] = len(__magic_name__ )
snake_case_ : Union[str, Any] = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case_ : List[Any] = sorted_profit_by_weight[length - i - 1]
snake_case_ : str = profit_by_weight.index(__magic_name__ )
snake_case_ : Union[str, Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
__lowerCamelCase : Any = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
__lowerCamelCase : str = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
__lowerCamelCase : Optional[Any] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 656
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A_ (a_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _A ( lowerCAmelCase__ :ArgumentParser ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _A ( self :Any ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 656
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656
| 1
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__lowerCamelCase : str = getLogger(__name__)
__lowerCamelCase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = 8 ,__magic_name__ = DEFAULT_DEVICE ,__magic_name__=False ,__magic_name__="summarization" ,__magic_name__=None ,**__magic_name__ ,)-> Dict:
"""simple docstring"""
snake_case_ : int = Path(__magic_name__ ).open("w" ,encoding="utf-8" )
snake_case_ : Union[str, Any] = str(__magic_name__ )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).to(__magic_name__ )
if fpaa:
snake_case_ : Union[str, Any] = model.half()
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
snake_case_ : int = time.time()
# update config with task specific params
use_task_specific_params(__magic_name__ ,__magic_name__ )
if prefix is None:
snake_case_ : Tuple = prefix or getattr(model.config ,"prefix" ,"" ) or ""
for examples_chunk in tqdm(list(chunks(__magic_name__ ,__magic_name__ ) ) ):
snake_case_ : List[Any] = [prefix + text for text in examples_chunk]
snake_case_ : Tuple = tokenizer(__magic_name__ ,return_tensors="pt" ,truncation=__magic_name__ ,padding="longest" ).to(__magic_name__ )
snake_case_ : Dict = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**__magic_name__ ,)
snake_case_ : int = tokenizer.batch_decode(__magic_name__ ,skip_special_tokens=__magic_name__ ,clean_up_tokenization_spaces=__magic_name__ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
snake_case_ : Any = int(time.time() - start_time ) # seconds
snake_case_ : Optional[Any] = len(__magic_name__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def __UpperCAmelCase ( __magic_name__=True )-> Dict:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("model_name" ,type=__magic_name__ ,help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" ,type=__magic_name__ ,help="like cnn_dm/test.source" )
parser.add_argument("save_path" ,type=__magic_name__ ,help="where to save summaries" )
parser.add_argument("--reference_path" ,type=__magic_name__ ,required=__magic_name__ ,help="like cnn_dm/test.target" )
parser.add_argument("--score_path" ,type=__magic_name__ ,required=__magic_name__ ,default="metrics.json" ,help="where to save metrics" )
parser.add_argument("--device" ,type=__magic_name__ ,required=__magic_name__ ,default=__magic_name__ ,help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" ,type=__magic_name__ ,required=__magic_name__ ,default=__magic_name__ ,help="will be added to the begininng of src examples" )
parser.add_argument("--task" ,type=__magic_name__ ,default="summarization" ,help="used for task_specific_params + metrics" )
parser.add_argument("--bs" ,type=__magic_name__ ,default=8 ,required=__magic_name__ ,help="batch size" )
parser.add_argument(
"--n_obs" ,type=__magic_name__ ,default=-1 ,required=__magic_name__ ,help="How many observations. Defaults to all." )
parser.add_argument("--fp16" ,action="store_true" )
parser.add_argument("--dump-args" ,action="store_true" ,help="print the custom hparams with the results" )
parser.add_argument(
"--info" ,nargs="?" ,type=__magic_name__ ,const=datetime_now() ,help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
snake_case_, snake_case_ : List[Any] = parser.parse_known_args()
snake_case_ : List[Any] = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
snake_case_ : Dict = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
snake_case_ : Dict = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__magic_name__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
snake_case_ : int = generate_summaries_or_translations(
__magic_name__ ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**__magic_name__ ,)
if args.reference_path is None:
return {}
# Compute scores
snake_case_ : Dict = calculate_bleu if "translation" in args.task else calculate_rouge
snake_case_ : str = [x.rstrip() for x in open(args.save_path ).readlines()]
snake_case_ : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__magic_name__ )]
snake_case_ : dict = score_fn(__magic_name__ ,__magic_name__ )
scores.update(__magic_name__ )
if args.dump_args:
scores.update(__magic_name__ )
if args.info:
snake_case_ : Union[str, Any] = args.info
if verbose:
print(__magic_name__ )
if args.score_path is not None:
json.dump(__magic_name__ ,open(args.score_path ,"w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 656
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ (a_ ):
"""simple docstring"""
a__ = ['''pixel_values''']
def __init__( self :str , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :List[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : List[str] = size if size is not None else {"shortest_edge": 224}
snake_case_ : List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size" )
snake_case_ : Dict = do_resize
snake_case_ : Union[str, Any] = size
snake_case_ : str = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Optional[Any] = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : int = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : Dict = do_convert_rgb
def _A ( self :int , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case_ : Optional[int] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :str , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Any , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :str , ) -> List[str]:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Tuple , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :int = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :str , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ , param_name="size" , default_to_square=lowerCAmelCase__ )
snake_case_ : List[str] = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__ )
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Any = image_std if image_std is not None else self.image_std
snake_case_ : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : Union[str, Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Tuple = [convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : str = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
snake_case_ : Optional[int] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
snake_case_ : Union[str, Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
snake_case_ : Tuple = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
snake_case_ : List[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
snake_case_ : Optional[int] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
snake_case_ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 656
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = args.pruning_method
snake_case_ : Union[str, Any] = args.threshold
snake_case_ : str = args.model_name_or_path.rstrip("/" )
snake_case_ : int = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
snake_case_ : Optional[int] = torch.load(os.path.join(__magic_name__ ,"pytorch_model.bin" ) )
snake_case_ : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case_ : List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
snake_case_ : Optional[Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
snake_case_ : Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
snake_case_ : int = MagnitudeBinarizer.apply(inputs=__magic_name__ ,threshold=__magic_name__ )
snake_case_ : int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case_ : Any = name[:-6]
snake_case_ : Any = model[F'''{prefix_}mask_scores''']
snake_case_ : Union[str, Any] = TopKBinarizer.apply(__magic_name__ ,__magic_name__ )
snake_case_ : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case_ : Optional[Any] = name[:-6]
snake_case_ : Any = model[F'''{prefix_}mask_scores''']
snake_case_ : Tuple = ThresholdBinarizer.apply(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case_ : List[str] = name[:-6]
snake_case_ : List[Any] = model[F'''{prefix_}mask_scores''']
snake_case_, snake_case_ : Dict = -0.1, 1.1
snake_case_ : Optional[Any] = torch.sigmoid(__magic_name__ )
snake_case_ : Union[str, Any] = s * (r - l) + l
snake_case_ : Optional[int] = s_bar.clamp(min=0.0 ,max=1.0 )
snake_case_ : Dict = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
snake_case_ : int = os.path.join(
os.path.dirname(__magic_name__ ) ,F'''bertarized_{os.path.basename(__magic_name__ )}''' )
if not os.path.isdir(__magic_name__ ):
shutil.copytree(__magic_name__ ,__magic_name__ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(__magic_name__ ,os.path.join(__magic_name__ ,"pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__lowerCamelCase : Dict = parser.parse_args()
main(args)
| 656
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
return [ord(__magic_name__ ) - 96 for elem in plain]
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = encode(input("-> " ).strip().lower() )
print("Encoded: " ,__magic_name__ )
print("Decoded:" ,decode(__magic_name__ ) )
if __name__ == "__main__":
main()
| 656
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A_ (a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :float , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Dict = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Any = False
snake_case_ : Optional[int] = nn.Dropout(p=lowerCAmelCase__ )
snake_case_ : int = TaConfig(
vocab_size=lowerCAmelCase__ , d_model=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , feed_forward_proj=lowerCAmelCase__ , is_decoder=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , )
snake_case_ : str = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
snake_case_ : List[Any] = TaBlock(lowerCAmelCase__ )
self.encoders.append(lowerCAmelCase__ )
snake_case_ : Dict = TaLayerNorm(lowerCAmelCase__ )
snake_case_ : List[Any] = nn.Dropout(p=lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.token_embedder(lowerCAmelCase__ )
snake_case_ : Tuple = encoder_input_tokens.shape[1]
snake_case_ : Union[str, Any] = torch.arange(lowerCAmelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase__ )
snake_case_ : List[str] = self.dropout_pre(lowerCAmelCase__ )
# inverted the attention mask
snake_case_ : int = encoder_input_tokens.size()
snake_case_ : str = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ )
for lyr in self.encoders:
snake_case_ : Optional[int] = lyr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
snake_case_ : Any = self.layer_norm(lowerCAmelCase__ )
return self.dropout_post(lowerCAmelCase__ ), encoder_inputs_mask
| 656
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int]=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :int=18 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=[0.5, 0.5, 0.5] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = size if size is not None else {"height": 18, "width": 18}
snake_case_ : List[str] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : List[str] = image_size
snake_case_ : int = min_resolution
snake_case_ : Optional[Any] = max_resolution
snake_case_ : List[str] = do_resize
snake_case_ : int = size
snake_case_ : List[Any] = do_normalize
snake_case_ : List[Any] = image_mean
snake_case_ : List[Any] = image_std
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = DPTImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = DPTImageProcessingTester(self )
@property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self :Optional[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case_ : Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 656
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656
| 1
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656
| 1
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __UpperCAmelCase ( __magic_name__ = 150_0000 )-> int:
"""simple docstring"""
snake_case_ : defaultdict = defaultdict(__magic_name__ )
snake_case_ : Tuple = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,__magic_name__ ,2 ):
if gcd(__magic_name__ ,__magic_name__ ) > 1:
continue
snake_case_ : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__magic_name__ ,limit + 1 ,__magic_name__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 656
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
snake_case_ : Union[str, Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
snake_case_ : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
snake_case_ : Any = max(len(__magic_name__ ) ,len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) ,b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656
| 1
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : int = tempfile.mkdtemp()
snake_case_ : Union[str, Any] = 8
# DPR tok
snake_case_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case_ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
snake_case_ : Tuple = os.path.join(lowerCAmelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Tuple = {"unk_token": "<unk>"}
snake_case_ : str = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
snake_case_ : Any = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : List[Any] = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Union[str, Any] ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _A ( self :List[str] ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _A ( self :Union[str, Any] ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _A ( self :int ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : str = self.get_dummy_dataset()
snake_case_ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case_ : List[Any] = dataset
snake_case_ : Optional[Any] = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _A ( self :Tuple , lowerCAmelCase__ :bool ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.get_dummy_dataset()
snake_case_ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case_ : Any = os.path.join(self.tmpdirname , "dataset" )
snake_case_ : Any = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case_ : int = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ : str = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase__ ) , )
return retriever
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ : str = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case_ : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case_ : int = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(lowerCAmelCase__ , open(lowerCAmelCase__ , "wb" ) )
snake_case_ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case_ : Union[str, Any] = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = 1
snake_case_ : List[str] = self.get_dummy_canonical_hf_index_retriever()
snake_case_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_, snake_case_, snake_case_ : Union[str, Any] = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case_ : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Optional[Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : str = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : str = 1
snake_case_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
snake_case_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_, snake_case_, snake_case_ : Dict = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : Any = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = 1
snake_case_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
snake_case_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_, snake_case_, snake_case_ : int = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Dict = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : int = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = 1
snake_case_ : Dict = self.get_dummy_legacy_index_retriever()
snake_case_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_, snake_case_, snake_case_ : Optional[Any] = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Optional[Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : List[Any] = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
import torch
snake_case_ : Optional[Any] = 1
snake_case_ : Dict = self.get_dummy_canonical_hf_index_retriever()
snake_case_ : str = [[5, 7], [10, 11]]
snake_case_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : List[Any] = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_ : Union[str, Any] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
snake_case_ : Any = retriever(
lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ , return_tensors="pt" , )
snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : str = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ : int = 1
snake_case_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase__ )
snake_case_ : Optional[Any] = [[5, 7], [10, 11]]
snake_case_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : str = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ )
self.assertEqual(
len(lowerCAmelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , lowerCAmelCase__ ) # check for doc token related keys in dictionary.
| 656
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656
| 1
|
'''simple docstring'''
import argparse
import os
import re
__lowerCamelCase : Optional[int] = '''src/transformers'''
# Pattern that looks at the indentation in a line.
__lowerCamelCase : List[Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCamelCase : List[Any] = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCamelCase : Optional[Any] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCamelCase : List[str] = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCamelCase : Dict = re.compile(R'''\[([^\]]+)\]''')
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = _re_indent.search(__magic_name__ )
return "" if search is None else search.groups()[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__="" ,__magic_name__=None ,__magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = 0
snake_case_ : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__magic_name__ ):
index += 1
snake_case_ : str = ["\n".join(lines[:index] )]
else:
snake_case_ : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case_ : Optional[Any] = [lines[index]]
index += 1
while index < len(__magic_name__ ) and (end_prompt is None or not lines[index].startswith(__magic_name__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__magic_name__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__magic_name__ ) )
if index < len(__magic_name__ ) - 1:
snake_case_ : str = [lines[index + 1]]
index += 1
else:
snake_case_ : Tuple = []
else:
blocks.append("\n".join(__magic_name__ ) )
snake_case_ : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__magic_name__ ) > 0:
blocks.append("\n".join(__magic_name__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__magic_name__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
def _inner(__magic_name__ ):
return key(__magic_name__ ).lower().replace("_" ,"" )
return _inner
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=None )-> Dict:
"""simple docstring"""
def noop(__magic_name__ ):
return x
if key is None:
snake_case_ : Dict = noop
# Constants are all uppercase, they go first.
snake_case_ : Dict = [obj for obj in objects if key(__magic_name__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case_ : Dict = [obj for obj in objects if key(__magic_name__ )[0].isupper() and not key(__magic_name__ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case_ : str = [obj for obj in objects if not key(__magic_name__ )[0].isupper()]
snake_case_ : List[Any] = ignore_underscore(__magic_name__ )
return sorted(__magic_name__ ,key=__magic_name__ ) + sorted(__magic_name__ ,key=__magic_name__ ) + sorted(__magic_name__ ,key=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
def _replace(__magic_name__ ):
snake_case_ : Any = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
snake_case_ : Tuple = [part.strip().replace("\"" ,"" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ : Tuple = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__magic_name__ )] ) + "]"
snake_case_ : str = import_statement.split("\n" )
if len(__magic_name__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case_ : Tuple = 2 if lines[1].strip() == "[" else 1
snake_case_ : Dict = [(i, _re_strip_line.search(__magic_name__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case_ : Optional[Any] = sort_objects(__magic_name__ ,key=lambda __magic_name__ : x[1] )
snake_case_ : Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__magic_name__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case_ : int = _re_bracket_content.sub(_replace ,lines[1] )
else:
snake_case_ : str = [part.strip().replace("\"" ,"" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ : List[Any] = keys[:-1]
snake_case_ : List[Any] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(__magic_name__ )] )
return "\n".join(__magic_name__ )
else:
# Finally we have to deal with imports fitting on one line
snake_case_ : Optional[int] = _re_bracket_content.sub(_replace ,__magic_name__ )
return import_statement
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=True )-> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__ ,encoding="utf-8" ) as f:
snake_case_ : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case_ : List[Any] = split_code_in_indented_blocks(
__magic_name__ ,start_prompt="_import_structure = {" ,end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(__magic_name__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case_ : List[str] = main_blocks[block_idx]
snake_case_ : str = block.split("\n" )
# Get to the start of the imports.
snake_case_ : Optional[Any] = 0
while line_idx < len(__magic_name__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case_ : str = len(__magic_name__ )
else:
line_idx += 1
if line_idx >= len(__magic_name__ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case_ : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
snake_case_ : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case_ : Union[str, Any] = split_code_in_indented_blocks(__magic_name__ ,indent_level=__magic_name__ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case_ : Dict = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case_ : List[str] = [(pattern.search(__magic_name__ ).groups()[0] if pattern.search(__magic_name__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case_ : Dict = [(i, key) for i, key in enumerate(__magic_name__ ) if key is not None]
snake_case_ : str = [x[0] for x in sorted(__magic_name__ ,key=lambda __magic_name__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case_ : Optional[Any] = 0
snake_case_ : Any = []
for i in range(len(__magic_name__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case_ : List[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__magic_name__ )
count += 1
# And we put our main block back together with its first and last line.
snake_case_ : Optional[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__magic_name__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__magic_name__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(__magic_name__ ) )
def __UpperCAmelCase ( __magic_name__=True )-> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
snake_case_ : Union[str, Any] = sort_imports(os.path.join(__magic_name__ ,"__init__.py" ) ,check_only=__magic_name__ )
if result:
snake_case_ : Optional[int] = [os.path.join(__magic_name__ ,"__init__.py" )]
if len(__magic_name__ ) > 0:
raise ValueError(F'''Would overwrite {len(__magic_name__ )} files, run `make style`.''' )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__lowerCamelCase : Any = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 656
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656
| 1
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
while a != 0:
snake_case_, snake_case_ : Any = b % a, a
return b
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if gcd(__magic_name__ ,__magic_name__ ) != 1:
snake_case_ : List[Any] = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__magic_name__ )
snake_case_, snake_case_, snake_case_ : int = 1, 0, a
snake_case_, snake_case_, snake_case_ : Tuple = 0, 1, m
while va != 0:
snake_case_ : int = ua // va
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 656
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_, snake_case_ : str = len(__magic_name__ ), len(grid[0] )
if (
min(__magic_name__ ,__magic_name__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
snake_case_ : List[str] = 0
count += depth_first_search(__magic_name__ ,row + 1 ,__magic_name__ ,__magic_name__ )
count += depth_first_search(__magic_name__ ,row - 1 ,__magic_name__ ,__magic_name__ )
count += depth_first_search(__magic_name__ ,__magic_name__ ,col + 1 ,__magic_name__ )
count += depth_first_search(__magic_name__ ,__magic_name__ ,col - 1 ,__magic_name__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> float:
"""simple docstring"""
snake_case_ : Dict = 0
while len(__magic_name__ ) > 1:
snake_case_ : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
snake_case_ : Union[str, Any] = files.index(min(__magic_name__ ) )
temp += files[min_index]
files.pop(__magic_name__ )
files.append(__magic_name__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656
| 1
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCamelCase : Union[str, Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = torchvision.models.resnetaaa(pretrained=lowerCAmelCase__ )
snake_case_ : Tuple = list(model.children() )[:-2]
snake_case_ : Union[str, Any] = nn.Sequential(*lowerCAmelCase__ )
snake_case_ : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _A ( self :List[Any] , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.pool(self.model(lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = torch.flatten(lowerCAmelCase__ , start_dim=2 )
snake_case_ : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class A_ (a_ ):
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : str = [json.loads(lowerCAmelCase__ ) for l in open(lowerCAmelCase__ )]
snake_case_ : Tuple = os.path.dirname(lowerCAmelCase__ )
snake_case_ : Any = tokenizer
snake_case_ : List[Any] = labels
snake_case_ : Union[str, Any] = len(lowerCAmelCase__ )
snake_case_ : str = max_seq_length
snake_case_ : Tuple = transforms
def __len__( self :Dict ) -> Any:
'''simple docstring'''
return len(self.data )
def __getitem__( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCAmelCase__ ) )
snake_case_, snake_case_, snake_case_ : str = sentence[0], sentence[1:-1], sentence[-1]
snake_case_ : Tuple = sentence[: self.max_seq_length]
snake_case_ : List[Any] = torch.zeros(self.n_classes )
snake_case_ : List[str] = 1
snake_case_ : str = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
snake_case_ : int = self.transforms(lowerCAmelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Any = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = [len(row["sentence"] ) for row in batch]
snake_case_, snake_case_ : Optional[Any] = len(__magic_name__ ), max(__magic_name__ )
snake_case_ : Any = torch.zeros(__magic_name__ ,__magic_name__ ,dtype=torch.long )
snake_case_ : List[Any] = torch.zeros(__magic_name__ ,__magic_name__ ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__magic_name__ ,__magic_name__ ) ):
snake_case_ : Optional[int] = input_row["sentence"]
snake_case_ : List[str] = 1
snake_case_ : Dict = torch.stack([row["image"] for row in batch] )
snake_case_ : Dict = torch.stack([row["label"] for row in batch] )
snake_case_ : int = torch.stack([row["image_start_token"] for row in batch] )
snake_case_ : List[Any] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] ,std=[0.12_221_994, 0.12_145_835, 0.14_380_469] ,),
] )
| 656
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : str = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
a__ = ['''pixel_values''']
def __init__( self :Tuple , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Dict[str, int]] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :Dict , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 256}
snake_case_ : List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ )
snake_case_ : str = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[int] = resample
snake_case_ : Union[str, Any] = do_center_crop
snake_case_ : List[str] = crop_size
snake_case_ : Optional[int] = do_rescale
snake_case_ : int = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self :str , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case_ : Optional[Any] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :int , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Dict , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :float , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Union[str, Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :str , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Tuple , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[float] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :str , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = size if size is not None else self.size
snake_case_ : Dict = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ : Dict = resample if resample is not None else self.resample
snake_case_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Dict = get_size_dict(lowerCAmelCase__ )
snake_case_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
snake_case_ : Any = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
snake_case_ : Tuple = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
snake_case_ : Any = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 656
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
@property
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCAmelCase__ )
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.dummy_uncond_unet
snake_case_ : Optional[int] = DDIMScheduler()
snake_case_ : Dict = self.dummy_vq_model
snake_case_ : List[Any] = LDMPipeline(unet=lowerCAmelCase__ , vqvae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ldm.to(lowerCAmelCase__ )
ldm.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = ldm(generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="numpy" ).images
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : List[str] = ldm(generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="numpy" , return_dict=lowerCAmelCase__ )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : int = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
snake_case_ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(lowerCAmelCase__ )
ldm.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : Any = ldm(generator=lowerCAmelCase__ , num_inference_steps=5 , output_type="numpy" ).images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ : Any = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
snake_case_ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 656
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
for ch in input_str:
snake_case_ : int = ord(__magic_name__ )
snake_case_ : Tuple = pow(2 ,__magic_name__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656
| 1
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCamelCase : int = logging.get_logger(__name__)
# General docstring
__lowerCamelCase : str = '''PoolFormerConfig'''
# Base docstring
__lowerCamelCase : Tuple = '''sail/poolformer_s12'''
__lowerCamelCase : int = [1, 512, 7, 7]
# Image classification docstring
__lowerCamelCase : Any = '''sail/poolformer_s12'''
__lowerCamelCase : Any = '''tabby, tabby cat'''
__lowerCamelCase : Any = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 0.0 ,__magic_name__ = False )-> str:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
snake_case_ : List[Any] = 1 - drop_prob
snake_case_ : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case_ : Any = keep_prob + torch.rand(__magic_name__ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
snake_case_ : Dict = input.div(__magic_name__ ) * random_tensor
return output
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :Optional[float] = None ) -> None:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = drop_prob
def _A ( self :List[str] , lowerCAmelCase__ :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return drop_path(lowerCAmelCase__ , self.drop_prob , self.training )
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = patch_size if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case_ : Union[str, Any] = stride if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (stride, stride)
snake_case_ : Dict = padding if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (padding, padding)
snake_case_ : Union[str, Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ )
snake_case_ : Optional[Any] = norm_layer(lowerCAmelCase__ ) if norm_layer else nn.Identity()
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.projection(lowerCAmelCase__ )
snake_case_ : Dict = self.norm(lowerCAmelCase__ )
return embeddings
class A_ (nn.GroupNorm ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(1 , lowerCAmelCase__ , **lowerCAmelCase__ )
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.AvgPoolad(lowerCAmelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.pool(lowerCAmelCase__ ) - hidden_states
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ : int = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
snake_case_ : Any = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
snake_case_ : Optional[Any] = PoolFormerDropPath(lowerCAmelCase__ )
if isinstance(config.hidden_act , lowerCAmelCase__ ):
snake_case_ : List[str] = ACTaFN[config.hidden_act]
else:
snake_case_ : int = config.hidden_act
def _A ( self :Optional[Any] , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.conva(lowerCAmelCase__ )
snake_case_ : Dict = self.act_fn(lowerCAmelCase__ )
snake_case_ : Optional[int] = self.drop(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = self.conva(lowerCAmelCase__ )
snake_case_ : int = self.drop(lowerCAmelCase__ )
return hidden_states
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = PoolFormerPooling(lowerCAmelCase__ )
snake_case_ : Dict = PoolFormerOutput(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Any = PoolFormerGroupNorm(lowerCAmelCase__ )
snake_case_ : Any = PoolFormerGroupNorm(lowerCAmelCase__ )
# Useful for training neural nets
snake_case_ : Optional[int] = PoolFormerDropPath(lowerCAmelCase__ ) if drop_path > 0.0 else nn.Identity()
snake_case_ : Any = config.use_layer_scale
if config.use_layer_scale:
snake_case_ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
snake_case_ : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
def _A ( self :Tuple , lowerCAmelCase__ :Any ) -> List[Any]:
'''simple docstring'''
if self.use_layer_scale:
snake_case_ : str = self.pooling(self.before_norm(lowerCAmelCase__ ) )
snake_case_ : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case_ : Tuple = hidden_states + self.drop_path(lowerCAmelCase__ )
snake_case_ : str = ()
snake_case_ : Optional[int] = self.output(self.after_norm(lowerCAmelCase__ ) )
snake_case_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case_ : Dict = hidden_states + self.drop_path(lowerCAmelCase__ )
snake_case_ : Dict = (output,) + outputs
return outputs
else:
snake_case_ : List[str] = self.drop_path(self.pooling(self.before_norm(lowerCAmelCase__ ) ) )
# First residual connection
snake_case_ : Optional[int] = pooling_output + hidden_states
snake_case_ : Union[str, Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case_ : Tuple = self.drop_path(self.output(self.after_norm(lowerCAmelCase__ ) ) )
snake_case_ : Optional[int] = hidden_states + layer_output
snake_case_ : List[Any] = (output,) + outputs
return outputs
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : int = config
# stochastic depth decay rule
snake_case_ : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case_ : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case_ : List[Any] = nn.ModuleList(lowerCAmelCase__ )
# Transformer blocks
snake_case_ : Dict = []
snake_case_ : List[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case_ : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowerCAmelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowerCAmelCase__ ) )
snake_case_ : int = nn.ModuleList(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Tuple=True ) -> str:
'''simple docstring'''
snake_case_ : str = () if output_hidden_states else None
snake_case_ : List[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case_, snake_case_ : str = layers
# Get patch embeddings from hidden_states
snake_case_ : Dict = embedding_layer(lowerCAmelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowerCAmelCase__ ):
snake_case_ : List[str] = blk(lowerCAmelCase__ )
snake_case_ : Any = layer_outputs[0]
if output_hidden_states:
snake_case_ : Any = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class A_ (a_ ):
"""simple docstring"""
a__ = PoolFormerConfig
a__ = '''poolformer'''
a__ = '''pixel_values'''
a__ = True
def _A ( self :List[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :str=False ) -> str:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = value
__lowerCamelCase : str = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCamelCase : Tuple = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , a_ , )
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
snake_case_ : Optional[Any] = config
snake_case_ : int = PoolFormerEncoder(lowerCAmelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self :str , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
snake_case_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
snake_case_ : int = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
snake_case_ : List[str] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def _A ( self :str , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.dense(lowerCAmelCase__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , a_ , )
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : int = PoolFormerModel(lowerCAmelCase__ )
# Final norm
snake_case_ : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case_ : Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self :Dict , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.LongTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
snake_case_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Tuple = self.poolformer(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
snake_case_ : Dict = outputs[0]
snake_case_ : Optional[int] = self.classifier(self.norm(lowerCAmelCase__ ).mean([-2, -1] ) )
snake_case_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ : Tuple = "single_label_classification"
else:
snake_case_ : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case_ : Optional[Any] = MSELoss()
if self.num_labels == 1:
snake_case_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ : Union[str, Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
snake_case_ : List[Any] = CrossEntropyLoss()
snake_case_ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ : Any = BCEWithLogitsLoss()
snake_case_ : Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
snake_case_ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 656
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''rwkv'''
a__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Any=50_277 , lowerCAmelCase__ :Optional[int]=1_024 , lowerCAmelCase__ :Tuple=4_096 , lowerCAmelCase__ :Optional[int]=32 , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[str]=1E-5 , lowerCAmelCase__ :Any=0 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Optional[Any]=6 , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :str=True , **lowerCAmelCase__ :Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : str = context_length
snake_case_ : int = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ : Tuple = layer_norm_epsilon
snake_case_ : str = rescale_every
snake_case_ : Dict = use_cache
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : int = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 656
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
assert column_title.isupper()
snake_case_ : Optional[int] = 0
snake_case_ : List[Any] = len(__magic_name__ ) - 1
snake_case_ : List[str] = 0
while index >= 0:
snake_case_ : int = (ord(column_title[index] ) - 64) * pow(26 ,__magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 656
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''xlm-roberta-xl'''
def __init__( self :int , lowerCAmelCase__ :str=250_880 , lowerCAmelCase__ :Any=2_560 , lowerCAmelCase__ :Any=36 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[int]=10_240 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=514 , lowerCAmelCase__ :Tuple=1 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Any=1E-0_5 , lowerCAmelCase__ :Union[str, Any]=1 , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :Union[str, Any]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Tuple = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Tuple = hidden_act
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Any = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = position_embedding_type
snake_case_ : Optional[Any] = use_cache
snake_case_ : List[str] = classifier_dropout
class A_ (a_ ):
"""simple docstring"""
@property
def _A ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 656
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)-> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
snake_case_ : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) )
] # the reference grid
snake_case_ : Optional[Any] = 1
snake_case_ : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) )
] # the action grid
snake_case_ : List[Any] = init[0]
snake_case_ : List[Any] = init[1]
snake_case_ : Any = 0
snake_case_ : Tuple = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case_ : Tuple = [[f, g, x, y]]
snake_case_ : List[Any] = False # flag that is set when search is complete
snake_case_ : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(__magic_name__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case_ : Tuple = cell.pop()
snake_case_ : List[str] = next_cell[2]
snake_case_ : int = next_cell[3]
snake_case_ : Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case_ : Tuple = True
else:
for i in range(len(__magic_name__ ) ): # to try out different valid actions
snake_case_ : Optional[Any] = x + DIRECTIONS[i][0]
snake_case_ : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__magic_name__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case_ : Dict = g + cost
snake_case_ : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case_ : Union[str, Any] = 1
snake_case_ : Optional[Any] = i
snake_case_ : Optional[Any] = []
snake_case_ : Any = goal[0]
snake_case_ : Tuple = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case_ : Any = x - DIRECTIONS[action[x][y]][0]
snake_case_ : Tuple = y - DIRECTIONS[action[x][y]][1]
snake_case_ : Tuple = xa
snake_case_ : Tuple = ya
invpath.append([x, y] )
snake_case_ : Any = []
for i in range(len(__magic_name__ ) ):
path.append(invpath[len(__magic_name__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCamelCase : Any = [0, 0]
# all coordinates are given in format [y,x]
__lowerCamelCase : Optional[Any] = [len(grid) - 1, len(grid[0]) - 1]
__lowerCamelCase : Any = 1
# the cost map which pushes the path closer to the goal
__lowerCamelCase : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCamelCase : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCamelCase : Dict = 99
__lowerCamelCase , __lowerCamelCase : Dict = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 656
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Dict = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase : Tuple = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656
| 1
|
'''simple docstring'''
__lowerCamelCase : Dict = range(2, 20 + 1)
__lowerCamelCase : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Dict = sum(a_i[j] for j in range(__magic_name__ ,len(__magic_name__ ) ) )
snake_case_ : Any = sum(a_i[j] * base[j] for j in range(min(len(__magic_name__ ) ,__magic_name__ ) ) )
snake_case_, snake_case_ : Any = 0, 0
snake_case_ : Union[str, Any] = n - i
snake_case_ : str = memo.get(__magic_name__ )
if sub_memo is not None:
snake_case_ : Tuple = sub_memo.get(__magic_name__ )
if jumps is not None and len(__magic_name__ ) > 0:
# find and make the largest jump without going over
snake_case_ : Any = -1
for _k in range(len(__magic_name__ ) - 1 ,-1 ,-1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ : Optional[int] = _k
break
if max_jump >= 0:
snake_case_, snake_case_, snake_case_ : int = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ : Any = diff + c
for j in range(min(__magic_name__ ,len(__magic_name__ ) ) ):
snake_case_, snake_case_ : Dict = divmod(__magic_name__ ,10 )
if new_c > 0:
add(__magic_name__ ,__magic_name__ ,__magic_name__ )
else:
snake_case_ : Tuple = []
else:
snake_case_ : Optional[Any] = {c: []}
snake_case_ : Dict = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_, snake_case_ : Optional[int] = next_term(__magic_name__ ,k - 1 ,i + dn ,__magic_name__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_, snake_case_ : str = compute(__magic_name__ ,__magic_name__ ,i + dn ,__magic_name__ )
diff += _diff
dn += terms_jumped
snake_case_ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ : Any = 0
while j < len(__magic_name__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__magic_name__ ,(diff, dn, k) )
return (diff, dn)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__magic_name__ ):
a_i.extend([0 for _ in range(k - len(__magic_name__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ : int = i
snake_case_, snake_case_, snake_case_ : Optional[Any] = 0, 0, 0
for j in range(len(__magic_name__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ : str = ds_c + ds_b
diff += addend
snake_case_ : Union[str, Any] = 0
for j in range(__magic_name__ ):
snake_case_ : Optional[Any] = a_i[j] + addend
snake_case_, snake_case_ : Optional[Any] = divmod(__magic_name__ ,10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__magic_name__ ,__magic_name__ ,__magic_name__ )
return diff, i - start_i
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
for j in range(__magic_name__ ,len(__magic_name__ ) ):
snake_case_ : Tuple = digits[j] + addend
if s >= 10:
snake_case_, snake_case_ : Optional[int] = divmod(__magic_name__ ,10 )
snake_case_ : str = addend // 10 + quotient
else:
snake_case_ : Optional[Any] = s
snake_case_ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_, snake_case_ : Optional[int] = divmod(__magic_name__ ,10 )
digits.append(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 10**15 )-> int:
"""simple docstring"""
snake_case_ : List[str] = [1]
snake_case_ : Optional[Any] = 1
snake_case_ : Any = 0
while True:
snake_case_, snake_case_ : Optional[Any] = next_term(__magic_name__ ,20 ,i + dn ,__magic_name__ )
dn += terms_jumped
if dn == n - i:
break
snake_case_ : Optional[Any] = 0
for j in range(len(__magic_name__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 656
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656
| 1
|
'''simple docstring'''
__lowerCamelCase : int = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__lowerCamelCase : int = frozenset(['''prompt''', '''negative_prompt'''])
__lowerCamelCase : int = frozenset([])
__lowerCamelCase : str = frozenset(['''image'''])
__lowerCamelCase : List[Any] = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__lowerCamelCase : Optional[int] = frozenset(['''image'''])
__lowerCamelCase : List[Any] = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__lowerCamelCase : Tuple = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
__lowerCamelCase : str = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__lowerCamelCase : str = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
__lowerCamelCase : str = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__lowerCamelCase : Any = frozenset(['''image''', '''mask_image'''])
__lowerCamelCase : Any = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__lowerCamelCase : List[str] = frozenset(['''example_image''', '''image''', '''mask_image'''])
__lowerCamelCase : str = frozenset(['''class_labels'''])
__lowerCamelCase : Optional[int] = frozenset(['''class_labels'''])
__lowerCamelCase : Optional[int] = frozenset(['''batch_size'''])
__lowerCamelCase : int = frozenset([])
__lowerCamelCase : str = frozenset(['''batch_size'''])
__lowerCamelCase : Tuple = frozenset([])
__lowerCamelCase : Optional[Any] = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__lowerCamelCase : Optional[Any] = frozenset(['''prompt''', '''negative_prompt'''])
__lowerCamelCase : Any = frozenset(['''input_tokens'''])
__lowerCamelCase : int = frozenset(['''input_tokens'''])
| 656
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[str] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = KandinskyVaaInpaintPipeline
a__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
a__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
a__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
a__ = False
@property
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return 100
@property
def _A ( self :str ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case_ : Any = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.dummy_unet
snake_case_ : List[Any] = self.dummy_movq
snake_case_ : Dict = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
snake_case_ : Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=0 ) -> List[str]:
'''simple docstring'''
snake_case_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
snake_case_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
snake_case_ : Dict = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Union[str, Any] = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
snake_case_ : Dict = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case_ : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = "cpu"
snake_case_ : Optional[int] = self.get_dummy_components()
snake_case_ : Optional[int] = self.pipeline_class(**lowerCAmelCase__ )
snake_case_ : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
snake_case_ : Any = output.images
snake_case_ : Dict = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Dict = image[0, -3:, -3:, -1]
snake_case_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[str] = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _A ( self :str ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :int ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
snake_case_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case_ : int = np.ones((768, 768) , dtype=np.floataa )
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = "a hat"
snake_case_ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
snake_case_ : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
snake_case_ : Union[str, Any] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_, snake_case_ : Union[str, Any] = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case_ : List[Any] = pipeline(
image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 656
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656
| 1
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger('''transformers.models.speecht5''')
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
snake_case_ : Union[str, Any] = checkpoint["input_conv.weight_g"]
snake_case_ : List[Any] = checkpoint["input_conv.weight_v"]
snake_case_ : Optional[int] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
snake_case_ : Optional[Any] = checkpoint[F'''upsamples.{i}.1.weight_g''']
snake_case_ : Optional[Any] = checkpoint[F'''upsamples.{i}.1.weight_v''']
snake_case_ : Tuple = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case_ : Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
snake_case_ : List[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
snake_case_ : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
snake_case_ : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
snake_case_ : List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
snake_case_ : int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
snake_case_ : int = checkpoint["output_conv.1.weight_g"]
snake_case_ : Dict = checkpoint["output_conv.1.weight_v"]
snake_case_ : Optional[Any] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None ,)-> Dict:
"""simple docstring"""
if config_path is not None:
snake_case_ : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__magic_name__ )
else:
snake_case_ : List[Any] = SpeechTaHifiGanConfig()
snake_case_ : List[str] = SpeechTaHifiGan(__magic_name__ )
snake_case_ : List[Any] = torch.load(__magic_name__ )
load_weights(orig_checkpoint["model"]["generator"] ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = np.load(__magic_name__ )
snake_case_ : int = stats[0].reshape(-1 )
snake_case_ : Optional[Any] = stats[1].reshape(-1 )
snake_case_ : Any = torch.from_numpy(__magic_name__ ).float()
snake_case_ : int = torch.from_numpy(__magic_name__ ).float()
model.save_pretrained(__magic_name__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 656
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : list[list[str]] = [[] for _ in range(__magic_name__ )]
snake_case_ : Optional[int] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(__magic_name__ ) <= key:
return input_string
for position, character in enumerate(__magic_name__ ):
snake_case_ : Optional[int] = position % (lowest * 2) # puts it in bounds
snake_case_ : int = min(__magic_name__ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__magic_name__ )
snake_case_ : Union[str, Any] = ["".join(__magic_name__ ) for row in temp_grid]
snake_case_ : Optional[int] = "".join(__magic_name__ )
return output_string
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
snake_case_ : Any = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
snake_case_ : list[list[str]] = [[] for _ in range(__magic_name__ )] # generates template
for position in range(len(__magic_name__ ) ):
snake_case_ : int = position % (lowest * 2) # puts it in bounds
snake_case_ : str = min(__magic_name__ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
snake_case_ : Optional[int] = 0
for row in temp_grid: # fills in the characters
snake_case_ : Any = input_string[counter : counter + len(__magic_name__ )]
grid.append(list(__magic_name__ ) )
counter += len(__magic_name__ )
snake_case_ : Dict = "" # reads as zigzag
for position in range(len(__magic_name__ ) ):
snake_case_ : int = position % (lowest * 2) # puts it in bounds
snake_case_ : Union[str, Any] = min(__magic_name__ ,lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __UpperCAmelCase ( __magic_name__ )-> dict[int, str]:
"""simple docstring"""
snake_case_ : int = {}
for key_guess in range(1 ,len(__magic_name__ ) ): # tries every key
snake_case_ : Union[str, Any] = decrypt(__magic_name__ ,__magic_name__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
# TODO: upload to AWS
__lowerCamelCase : Tuple = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''retribert'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=30_522 , lowerCAmelCase__ :Any=768 , lowerCAmelCase__ :Tuple=8 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Any=3_072 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Tuple=0.0_2 , lowerCAmelCase__ :List[Any]=1E-1_2 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Dict=128 , lowerCAmelCase__ :Optional[int]=0 , **lowerCAmelCase__ :List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = vocab_size
snake_case_ : str = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : str = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : List[str] = share_encoders
snake_case_ : Dict = projection_dim
| 656
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowerCamelCase : Union[str, Any] = ['''bert-base-uncased''', '''bert-base-cased''']
__lowerCamelCase : Optional[int] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class A_ (tf.keras.Model ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : int = tokenizer
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = TFAutoModel.from_config(lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer(lowerCAmelCase__ )
snake_case_ : Tuple = self.bert(**lowerCAmelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : str = [
BertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
snake_case_ : Dict = [TFBertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCAmelCase__ , use_fast_bert_tokenizer=lowerCAmelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case_ : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case_ : List[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ : List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding="longest" )
snake_case_ : Dict = tf_tokenizer(lowerCAmelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Optional[Any] = tf_tokenizer(self.paired_sentences )
snake_case_ : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Any = tf.function(lowerCAmelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ : Optional[Any] = tf.constant(lowerCAmelCase__ )
snake_case_ : List[Any] = compiled_tokenizer(lowerCAmelCase__ )
snake_case_ : List[str] = tf_tokenizer(lowerCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Optional[int] = ModelToSave(tokenizer=lowerCAmelCase__ )
snake_case_ : str = tf.convert_to_tensor(self.test_sentences )
snake_case_ : Dict = model(lowerCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case_ : Optional[int] = Path(lowerCAmelCase__ ) / "saved.model"
model.save(lowerCAmelCase__ )
snake_case_ : Dict = tf.keras.models.load_model(lowerCAmelCase__ )
snake_case_ : str = loaded_model(lowerCAmelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 656
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : int = [1]
for i in range(2 ,__magic_name__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case_ : int = []
snake_case_ : Any = list(range(__magic_name__ ) )
# Find permutation
while factorials:
snake_case_ : Tuple = factorials.pop()
snake_case_, snake_case_ : int = divmod(__magic_name__ ,__magic_name__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return 1
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 200 )-> int:
"""simple docstring"""
return two_pound(__magic_name__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 656
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''bloom'''
a__ = ['''past_key_values''']
a__ = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=250_880 , lowerCAmelCase__ :List[str]=64 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :Optional[int]=False , **lowerCAmelCase__ :str , ) -> Dict:
'''simple docstring'''
snake_case_ : str = vocab_size
# Backward compatibility with n_embed kwarg
snake_case_ : Any = kwargs.pop("n_embed" , lowerCAmelCase__ )
snake_case_ : List[Any] = hidden_size if n_embed is None else n_embed
snake_case_ : str = n_layer
snake_case_ : Optional[Any] = n_head
snake_case_ : int = layer_norm_epsilon
snake_case_ : Dict = initializer_range
snake_case_ : Tuple = use_cache
snake_case_ : Any = pretraining_tp
snake_case_ : Any = apply_residual_connection_post_layernorm
snake_case_ : List[Any] = hidden_dropout
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : List[str] = eos_token_id
snake_case_ : Any = slow_but_exact
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
class A_ (a_ ):
"""simple docstring"""
a__ = version.parse('''1.12''' )
def __init__( self :Optional[int] , lowerCAmelCase__ :PretrainedConfig , lowerCAmelCase__ :str = "default" , lowerCAmelCase__ :List[PatchingSpec] = None , lowerCAmelCase__ :bool = False , ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , task=lowerCAmelCase__ , patching_specs=lowerCAmelCase__ , use_past=lowerCAmelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCAmelCase__ ):
# TODO: how to do that better?
snake_case_ : List[Any] = 0
@property
def _A ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" , inverted_values_shape=lowerCAmelCase__ )
snake_case_ : int = {0: "batch", 1: "past_sequence + sequence"}
else:
snake_case_ : int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _A ( self :str ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def _A ( self :int ) -> int:
'''simple docstring'''
return self._config.n_head
@property
def _A ( self :Tuple ) -> float:
'''simple docstring'''
return 1E-3
def _A ( self :Optional[int] , lowerCAmelCase__ :"PreTrainedTokenizer" , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : int = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
snake_case_ : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_, snake_case_ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case_ : Any = seqlen + 2
snake_case_ : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
snake_case_ : str = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case_ : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case_ : Dict = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
snake_case_ : str = common_inputs["attention_mask"]
if self.use_past:
snake_case_ : Any = ordered_inputs["attention_mask"].dtype
snake_case_ : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
return 13
| 656
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656
| 1
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Dict = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Any = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : List[Any] = value
else:
snake_case_ : Dict = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : str = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : str = in_proj_bias[:256]
snake_case_ : Dict = in_proj_weight[256:512, :]
snake_case_ : Optional[Any] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : Tuple = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case_ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : int = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : List[Any] = in_proj_weight[256:512, :]
snake_case_ : List[str] = in_proj_bias[256:512]
snake_case_ : Dict = in_proj_weight[-256:, :]
snake_case_ : int = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case_ : Optional[int] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
snake_case_ : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case_ : int = in_proj_weight_cross_attn[:256, :]
snake_case_ : Optional[Any] = in_proj_bias_cross_attn[:256]
snake_case_ : Any = in_proj_weight_cross_attn[256:512, :]
snake_case_ : Optional[int] = in_proj_bias_cross_attn[256:512]
snake_case_ : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
snake_case_ : Any = in_proj_bias_cross_attn[-256:]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_, snake_case_ : Optional[int] = image.size
snake_case_ : Dict = max(__magic_name__ ,__magic_name__ )
snake_case_ : Optional[int] = 800 if "detection" in checkpoint_url else 1000
snake_case_ : Optional[int] = target_max_size / current_max_size
snake_case_ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = F.to_tensor(__magic_name__ )
snake_case_ : List[Any] = F.normalize(__magic_name__ ,mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
snake_case_ : str = torch.hub.load_state_dict_from_url(__magic_name__ ,map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Dict = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : Tuple = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : str = state_dict.pop(__magic_name__ )
snake_case_ : int = val
# create HuggingFace model and load state dict
snake_case_ : List[str] = TableTransformerConfig(
backbone="resnet18" ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
snake_case_ : str = 15
snake_case_ : Tuple = 2
snake_case_ : Any = {0: "table", 1: "table rotated"}
snake_case_ : List[str] = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
snake_case_ : int = 125
snake_case_ : List[Any] = 6
snake_case_ : str = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
snake_case_ : Any = idalabel
snake_case_ : int = {v: k for k, v in idalabel.items()}
snake_case_ : List[str] = DetrImageProcessor(
format="coco_detection" ,max_size=800 if "detection" in checkpoint_url else 1000 )
snake_case_ : Optional[Any] = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
snake_case_ : Dict = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
snake_case_ : List[str] = hf_hub_download(repo_id="nielsr/example-pdf" ,repo_type="dataset" ,filename=__magic_name__ )
snake_case_ : Any = Image.open(__magic_name__ ).convert("RGB" )
snake_case_ : Optional[int] = normalize(resize(__magic_name__ ,__magic_name__ ) ).unsqueeze(0 )
snake_case_ : Optional[Any] = model(__magic_name__ )
if "detection" in checkpoint_url:
snake_case_ : Optional[Any] = (1, 15, 3)
snake_case_ : Union[str, Any] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
snake_case_ : int = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
snake_case_ : Any = (1, 125, 7)
snake_case_ : Dict = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
snake_case_ : Optional[Any] = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,__magic_name__ ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,__magic_name__ ,atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
snake_case_ : str = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 656
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656
| 1
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> set:
"""simple docstring"""
snake_case_ : int = set()
# edges = list of graph's edges
snake_case_ : Optional[Any] = get_edges(__magic_name__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
snake_case_, snake_case_ : List[Any] = edges.pop()
chosen_vertices.add(__magic_name__ )
chosen_vertices.add(__magic_name__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__magic_name__ )
return chosen_vertices
def __UpperCAmelCase ( __magic_name__ )-> set:
"""simple docstring"""
snake_case_ : Optional[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 656
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ = "The quick brown fox jumps over the lazy dog" ,)-> bool:
"""simple docstring"""
snake_case_ : Any = set()
# Replace all the whitespace in our sentence
snake_case_ : Any = input_str.replace(" " ,"" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__magic_name__ ) == 26
def __UpperCAmelCase ( __magic_name__ = "The quick brown fox jumps over the lazy dog" ,)-> bool:
"""simple docstring"""
snake_case_ : int = [False] * 26
for char in input_str:
if char.islower():
snake_case_ : Union[str, Any] = True
elif char.isupper():
snake_case_ : str = True
return all(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = "The quick brown fox jumps over the lazy dog" ,)-> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
from timeit import timeit
snake_case_ : Any = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" ,setup=__magic_name__ ) )
print(timeit("is_pangram_faster()" ,setup=__magic_name__ ) )
print(timeit("is_pangram_fastest()" ,setup=__magic_name__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 656
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowerCamelCase : Optional[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__lowerCamelCase : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__lowerCamelCase : Dict = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = simple_accuracy(__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = float(fa_score(y_true=__magic_name__ ,y_pred=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : List[str] = float(pearsonr(__magic_name__ ,__magic_name__ )[0] )
snake_case_ : Tuple = float(spearmanr(__magic_name__ ,__magic_name__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 656
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''transfo-xl'''
a__ = ['''mems''']
a__ = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int=267_735 , lowerCAmelCase__ :Tuple=[20_000, 40_000, 200_000] , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :int=1_024 , lowerCAmelCase__ :int=16 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :List[str]=1_600 , lowerCAmelCase__ :int=1_000 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :str=-1 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]="normal" , lowerCAmelCase__ :Any=0.0_1 , lowerCAmelCase__ :List[str]=0.0_1 , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Tuple=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :List[Any] , ) -> str:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : List[Any] = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
snake_case_ : Tuple = [False] + [True] * len(self.cutoffs )
else:
snake_case_ : int = [False] + [False] * len(self.cutoffs )
snake_case_ : Dict = d_model
snake_case_ : Union[str, Any] = d_embed
snake_case_ : List[Any] = d_head
snake_case_ : List[Any] = d_inner
snake_case_ : str = div_val
snake_case_ : Optional[Any] = pre_lnorm
snake_case_ : List[Any] = n_layer
snake_case_ : Tuple = n_head
snake_case_ : List[Any] = mem_len
snake_case_ : Union[str, Any] = same_length
snake_case_ : Union[str, Any] = attn_type
snake_case_ : Optional[Any] = clamp_len
snake_case_ : List[str] = sample_softmax
snake_case_ : List[Any] = adaptive
snake_case_ : Any = dropout
snake_case_ : List[str] = dropatt
snake_case_ : int = untie_r
snake_case_ : Dict = init
snake_case_ : List[Any] = init_range
snake_case_ : Tuple = proj_init_std
snake_case_ : Union[str, Any] = init_std
snake_case_ : Union[str, Any] = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self :Tuple ) -> str:
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _A ( self :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 656
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.