code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
# using dfs for finding eulerian path traversal
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
__lowerCAmelCase : Optional[int] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__lowerCAmelCase, __lowerCAmelCase : Any = True, True
__lowerCAmelCase : int = dfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return path
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Tuple = -1
for i in range(lowercase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__lowerCAmelCase : List[str] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__lowerCAmelCase, __lowerCAmelCase : str = check_circuit_or_path(lowercase__ , lowercase__ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
__lowerCAmelCase : Union[str, Any] = 1
if check == 2:
__lowerCAmelCase : Any = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
__lowerCAmelCase : Union[str, Any] = dfs(lowercase__ , lowercase__ , lowercase__ )
print(lowercase__ )
def _lowercase ( ):
__lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__lowerCAmelCase : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__lowerCAmelCase : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__lowerCAmelCase : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__lowerCAmelCase : Tuple = {
1: [],
2: []
# all degree is zero
}
__lowerCAmelCase : int = 1_0
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 275
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_ : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
A__ = state_dict.pop(UpperCAmelCase_ )
A__ = val
def _snake_case ( UpperCAmelCase_ : Tuple ):
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A__ = value
else:
A__ = value
return new_state_dict
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int=False ):
A__ = """"""
if is_panoptic:
A__ = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
def _snake_case ( ):
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
A__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A__ = """resnet101"""
if "dc5" in model_name:
A__ = True
A__ = """panoptic""" in model_name
if is_panoptic:
A__ = 250
else:
A__ = 91
A__ = """huggingface/label-files"""
A__ = """coco-detection-id2label.json"""
A__ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load image processor
A__ = """coco_panoptic""" if is_panoptic else """coco_detection"""
A__ = ConditionalDetrImageProcessor(format=UpperCAmelCase_ )
# prepare image
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
A__ = encoding["""pixel_values"""]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
A__ = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval()
A__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A__ = """conditional_detr.""" + src
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = rename_backbone_keys(UpperCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase_ , is_panoptic=UpperCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A__ = state_dict.pop(UpperCAmelCase_ )
A__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A__ = state_dict.pop(UpperCAmelCase_ )
A__ = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A__ = state_dict.pop(UpperCAmelCase_ )
A__ = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A__ = state_dict.pop(UpperCAmelCase_ )
A__ = val
# finally, create HuggingFace model and load state dict
A__ = ConditionalDetrForSegmentation(UpperCAmelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCAmelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A__ = conditional_detr(UpperCAmelCase_ )
A__ = model(UpperCAmelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 335
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __snake_case ( unittest.TestCase):
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Dict=9_9 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : Optional[int]=1_6 , __lowerCAmelCase : str=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : List[str]=4 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_attention_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Dict = num_choices
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Any = None
if self.use_attention_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __snake_case ( UpperCAmelCase__ , unittest.TestCase):
snake_case__ : Any = True
snake_case__ : Optional[Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaxBertModel.from_pretrained('''bert-base-cased''' )
_lowerCamelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 72
|
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 235
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( _lowerCamelCase: str = "AAPL" ):
__SCREAMING_SNAKE_CASE : Any = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
__SCREAMING_SNAKE_CASE : Tuple = BeautifulSoup(requests.get(_lowerCamelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE : List[str] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 112
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_SCREAMING_SNAKE_CASE = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : Dict , _A : Union[str, Any] , _A : List[str]=7 , _A : Optional[Any]=3 , _A : List[Any]=18 , _A : Tuple=30 , _A : Optional[int]=400 , _A : Any=None , _A : str=True , _A : List[str]=True , _A : Any=None , ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = size if size is not None else {'height': 20, 'width': 20}
snake_case_ : Tuple = parent
snake_case_ : List[Any] = batch_size
snake_case_ : str = num_channels
snake_case_ : Dict = image_size
snake_case_ : List[str] = min_resolution
snake_case_ : List[str] = max_resolution
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Any = do_convert_rgb
snake_case_ : str = [512, 1024, 2048, 4096]
snake_case_ : List[Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
snake_case_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ , unittest.TestCase ):
__magic_name__: str = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb' ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
snake_case_ : Tuple = 2048
snake_case_ : List[Any] = image_processor(_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
snake_case_ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Tuple = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
snake_case_ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
snake_case_ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
snake_case_ : int = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
snake_case_ : Optional[Any] = 'Hello'
snake_case_ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
snake_case_ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : List[str] = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Any = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Dict = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ , unittest.TestCase ):
__magic_name__: Dict = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : str = PixaStructImageProcessingTester(self , num_channels=4 )
snake_case_ : Optional[Any] = 3
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb' ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
snake_case_ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Any = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Dict = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 327
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase__ : int = logging.get_logger(__name__)
def __lowercase ( _a ):
if isinstance(_a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_a ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class _UpperCAmelCase ( UpperCAmelCase__):
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Any , ):
super().__init__(**_UpperCAmelCase )
snake_case_ : List[Any] = size if size is not None else {'''shortest_edge''': 256}
snake_case_ : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
snake_case_ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case_ : Tuple = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : List[str] = do_center_crop
snake_case_ : int = crop_size
snake_case_ : Any = resample
snake_case_ : Dict = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : Any = offset
snake_case_ : List[Any] = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ):
snake_case_ : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(_UpperCAmelCase , size['''shortest_edge'''] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
snake_case_ : List[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ):
snake_case_ : List[str] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have \'height\' and \'width\' as keys. Got {size.keys()}" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self : Any , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ):
snake_case_ : Optional[int] = image.astype(np.floataa )
if offset:
snake_case_ : int = image - (scale / 2)
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self : Tuple , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
snake_case_ : str = to_numpy_array(_UpperCAmelCase )
if do_resize:
snake_case_ : List[str] = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
snake_case_ : Optional[Any] = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
snake_case_ : Any = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , offset=_UpperCAmelCase )
if do_normalize:
snake_case_ : Optional[int] = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
snake_case_ : int = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _snake_case ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ):
snake_case_ : Dict = do_resize if do_resize is not None else self.do_resize
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Any = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Dict = offset if offset is not None else self.offset
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Any = size if size is not None else self.size
snake_case_ : int = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : List[Any] = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
snake_case_ : str = make_batched(_UpperCAmelCase )
snake_case_ : List[str] = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , offset=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
snake_case_ : int = {'''pixel_values''': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 264
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Optional[int] = 1_0_0
A : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( __magic_name__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 305
| 0
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : List[Any] = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase_ ( UpperCAmelCase__ ):
__UpperCAmelCase = 'data2vec-audio'
def __init__( self , a=32 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1e-5 , a="gelu" , a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=16 , a=19 , a=5 , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a="sum" , a=False , a=False , a=2_56 , a=(5_12, 5_12, 5_12, 5_12, 15_00) , a=(5, 3, 3, 1, 1) , a=(1, 2, 3, 1, 1) , a=5_12 , a=0 , a=1 , a=2 , a=False , a=3 , a=2 , a=3 , a=None , **a , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(_UpperCAmelCase )
UpperCamelCase__ = list(_UpperCAmelCase )
UpperCamelCase__ = list(_UpperCAmelCase )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(_UpperCAmelCase )
UpperCamelCase__ = list(_UpperCAmelCase )
UpperCamelCase__ = list(_UpperCAmelCase )
UpperCamelCase__ = xvector_output_dim
@property
def __a ( self ):
return math.prod(self.conv_stride )
| 80
|
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=13 , __UpperCAmelCase : Tuple=30 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[Any]=0.6 , __UpperCAmelCase : Dict=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = TFViTMAEModel(config=_UpperCAmelCase )
_A = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = TFViTMAEForPreTraining(_UpperCAmelCase )
_A = model(_UpperCAmelCase , training=_UpperCAmelCase )
# expected sequence length = num_patches
_A = (self.image_size // self.patch_size) ** 2
_A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A = 1
_A = TFViTMAEForPreTraining(_UpperCAmelCase )
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(_UpperCAmelCase , training=_UpperCAmelCase )
_A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A)) = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
snake_case = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = TFViTMAEModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
np.random.seed(2 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = int((config.image_size // config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , noise=_UpperCAmelCase )
_A = copy.deepcopy(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = model(**_UpperCAmelCase , noise=_UpperCAmelCase )
_A = outputs_dict[0].numpy()
_A = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
np.random.seed(2 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = int((config.image_size // config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__UpperCAmelCase : Any ):
_A = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_UpperCAmelCase ):
_A = v.numpy()
else:
_A = np.array(_UpperCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = prepare_numpy_arrays(_UpperCAmelCase )
_A = model(_UpperCAmelCase , noise=_UpperCAmelCase )
_A = model(**_UpperCAmelCase , noise=_UpperCAmelCase )
self.assert_outputs_same(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_A = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A = tf.constant(_UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A = tf_noise
super().check_pt_tf_models(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
np.random.seed(2 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_UpperCAmelCase )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(_UpperCAmelCase , _UpperCAmelCase ),)
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_UpperCAmelCase , "_keras_serializable" , _UpperCAmelCase )
}
_A = int((config.image_size // config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A = tf.convert_to_tensor(_UpperCAmelCase )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
_A = main_layer_class(_UpperCAmelCase )
_A = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_A = tf.keras.Model(_UpperCAmelCase , outputs=main_layer(_UpperCAmelCase ) )
_A = model(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_UpperCAmelCase , "keras_model.h5" )
model.save(_UpperCAmelCase )
_A = tf.keras.models.load_model(
_UpperCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_UpperCAmelCase , tf.keras.Model )
_A = model(_UpperCAmelCase )
self.assert_outputs_same(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
np.random.seed(2 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = int((config.image_size // config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , noise=_UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_A = outputs.last_hidden_state.numpy()
_A = 0
else:
_A = outputs.logits.numpy()
_A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
_A = model_class.from_pretrained(_UpperCAmelCase )
_A = model(_UpperCAmelCase , noise=_UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_A = after_outputs["last_hidden_state"].numpy()
_A = 0
else:
_A = after_outputs["logits"].numpy()
_A = 0
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1E-5 )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = int((config.image_size // config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , noise=_UpperCAmelCase )
_A = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_UpperCAmelCase )
_A = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_A = model_class.from_config(model.config )
_A = new_model(_UpperCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
_A = new_model(_UpperCAmelCase , noise=_UpperCAmelCase )
self.assert_outputs_same(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( ) -> int:
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
np.random.seed(2 )
_A = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A = ViTMAEConfig()
_A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A = np.random.uniform(size=(1, num_patches) )
# forward pass
_A = model(**_UpperCAmelCase , noise=_UpperCAmelCase )
# verify the logits
_A = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_A = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 79
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305
| 0
|
"""simple docstring"""
import torch
from torch import nn
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A , __A=1 , __A=False ) -> List[Any]:
super().__init__()
lowerCAmelCase_ :Dict = n_token
lowerCAmelCase_ :str = d_embed
lowerCAmelCase_ :Any = d_proj
lowerCAmelCase_ :Any = cutoffs + [n_token]
lowerCAmelCase_ :List[Any] = [0] + self.cutoffs
lowerCAmelCase_ :Optional[int] = div_val
lowerCAmelCase_ :Optional[Any] = self.cutoffs[0]
lowerCAmelCase_ :Dict = len(self.cutoffs ) - 1
lowerCAmelCase_ :Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase_ :Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase_ :Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase_ :Any = nn.ModuleList()
lowerCAmelCase_ :Any = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
else:
self.out_projs.append(_UpperCAmelCase )
self.out_layers.append(nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ :Optional[int] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(_UpperCAmelCase , r_idx - l_idx ) )
lowerCAmelCase_ :Optional[Any] = keep_order
def __lowerCAmelCase ( self , __A , __A , __A , __A ) -> Tuple:
if proj is None:
lowerCAmelCase_ :int = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase_ :Any = nn.functional.linear(_UpperCAmelCase , proj.t().contiguous() )
lowerCAmelCase_ :Dict = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __lowerCAmelCase ( self , __A , __A=None , __A=False ) -> Tuple:
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase_ :List[Any] = hidden[..., :-1, :].contiguous()
lowerCAmelCase_ :Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase_ :int = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase_ :str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCAmelCase_ :List[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase_ :List[str] = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase_ :str = labels != -100
lowerCAmelCase_ :Optional[Any] = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase_ :int = (
-nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase_ :Optional[int] = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase_ , lowerCAmelCase_ :Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ :Dict = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase_ :Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase_ :List[Any] = self.out_layers[i].weight
lowerCAmelCase_ :Dict = self.out_layers[i].bias
if i == 0:
lowerCAmelCase_ :Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase_ :List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase_ :List[Any] = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ :Any = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
if labels is None:
lowerCAmelCase_ :Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase_ :List[Any] = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :List[str] = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase_ :List[Any] = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase_ :str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase_ :Optional[Any] = labels.index_select(0 , _UpperCAmelCase ) - l_idx
lowerCAmelCase_ :List[str] = head_logprob.index_select(0 , _UpperCAmelCase )
lowerCAmelCase_ :Union[str, Any] = hidden.index_select(0 , _UpperCAmelCase )
else:
lowerCAmelCase_ :Tuple = hidden
if i == 0:
if labels is not None:
lowerCAmelCase_ :Tuple = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase_ :Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase_ :Any = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ :str = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
lowerCAmelCase_ :Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase_ :List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase_ :int = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase_ :Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , _UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __lowerCAmelCase ( self , __A ) -> str:
if self.n_clusters == 0:
lowerCAmelCase_ :List[Any] = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ :Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase_ :str = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase_ :Any = self.out_layers[i].weight
lowerCAmelCase_ :Dict = self.out_layers[i].bias
if i == 0:
lowerCAmelCase_ :List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase_ :List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase_ :int = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ :Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase_ :List[str] = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
lowerCAmelCase_ :List[Any] = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase_ :Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase_ :Optional[Any] = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ :Union[str, Any] = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
lowerCAmelCase_ :int = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase_ :str = logprob_i
return out
| 84
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ : List[Any] = 256
class lowerCAmelCase__ ( UpperCAmelCase__ ):
a__ : str = ["""melgan"""]
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : SpectrogramNotesEncoder , SCREAMING_SNAKE_CASE__ : SpectrogramContEncoder , SCREAMING_SNAKE_CASE__ : TaFilmDecoder , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
__lowerCamelCase = math.log(1e-5 ) # Matches MelGAN training.
__lowerCamelCase = 4.0 # Largest value for most examples
__lowerCamelCase = 1_28
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=(-1.0, 1.0) , SCREAMING_SNAKE_CASE__ : int=False ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = output_range
if clip:
__lowerCamelCase = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
__lowerCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=(-1.0, 1.0) , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase = input_range
__lowerCamelCase = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
__lowerCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = input_tokens > 0
__lowerCamelCase , __lowerCamelCase = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
__lowerCamelCase = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[List[int]] , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "numpy" , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_UpperCAmelCase )}.''' )
__lowerCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowerCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowerCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
__lowerCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowerCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowerCamelCase = ones
__lowerCamelCase = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
__lowerCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowerCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowerCamelCase = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
__lowerCamelCase = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
__lowerCamelCase = mel[:1]
__lowerCamelCase = mel.cpu().float().numpy()
__lowerCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('''Generated segment''' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__lowerCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowerCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 270
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = TaConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCAmelCase : Optional[Any] = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 275
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger('transformers.models.speecht5')
SCREAMING_SNAKE_CASE_ : List[Any] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
SCREAMING_SNAKE_CASE_ : Tuple = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
SCREAMING_SNAKE_CASE_ : Dict = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
SCREAMING_SNAKE_CASE_ : int = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
SCREAMING_SNAKE_CASE_ : List[str] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
SCREAMING_SNAKE_CASE_ : Dict = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
SCREAMING_SNAKE_CASE_ : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
SCREAMING_SNAKE_CASE_ : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE_ : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE_ : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
SCREAMING_SNAKE_CASE_ : Tuple = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
SCREAMING_SNAKE_CASE_ : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
for attribute in key.split(""".""" ):
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "running_mean":
A__ = value
elif weight_type == "running_var":
A__ = value
elif weight_type == "num_batches_tracked":
A__ = value
else:
A__ = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
A__ = []
if task == "s2t":
A__ = hf_model.speechta.encoder.prenet.feature_encoder
A__ = MAPPING_S2T
A__ = IGNORE_KEYS_S2T
elif task == "t2s":
A__ = None
A__ = MAPPING_T2S
A__ = IGNORE_KEYS_T2S
elif task == "s2s":
A__ = hf_model.speechta.encoder.prenet.feature_encoder
A__ = MAPPING_S2S
A__ = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(F"""{name} was ignored""" )
continue
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ = key.split(""".*.""" )
if prefix in name and suffix in name:
A__ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCAmelCase_ )[0].split(""".""" )[-2]
A__ = mapped_key.replace("""*""" , UpperCAmelCase_ )
if "weight_g" in name:
A__ = """weight_g"""
elif "weight_v" in name:
A__ = """weight_v"""
elif "bias" in name:
A__ = """bias"""
elif "weight" in name:
A__ = """weight"""
elif "running_mean" in name:
A__ = """running_mean"""
elif "running_var" in name:
A__ = """running_var"""
elif "num_batches_tracked" in name:
A__ = """num_batches_tracked"""
else:
A__ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
A__ = full_name.split("""conv_layers.""" )[-1]
A__ = name.split(""".""" )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=None , ):
if config_path is not None:
A__ = SpeechTaConfig.from_pretrained(UpperCAmelCase_ )
else:
A__ = SpeechTaConfig()
if task == "s2t":
A__ = config.max_text_positions
A__ = SpeechTaForSpeechToText(UpperCAmelCase_ )
elif task == "t2s":
A__ = 1876
A__ = 600
A__ = config.max_speech_positions
A__ = SpeechTaForTextToSpeech(UpperCAmelCase_ )
elif task == "s2s":
A__ = 1876
A__ = config.max_speech_positions
A__ = SpeechTaForSpeechToSpeech(UpperCAmelCase_ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
A__ = SpeechTaTokenizer(UpperCAmelCase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
A__ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
A__ = SpeechTaFeatureExtractor()
A__ = SpeechTaProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
A__ = torch.load(UpperCAmelCase_ )
recursively_load_weights(fairseq_checkpoint["""model"""] , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 335
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def snake_case_ ( A_ : str ):
'''simple docstring'''
re.sub('''<n>''', '''''', A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 72
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def __UpperCAmelCase ( __a : str ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
inspect_dataset(__a ,__a )
_a : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def __UpperCAmelCase ( __a : Any ,__a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(__a ,__a )
_a : Tuple = path + '''.py'''
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Any ,__a : Dict ) -> List[str]:
"""simple docstring"""
_a : List[Any] = get_dataset_config_info(__a ,config_name=__a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def __UpperCAmelCase ( __a : Any ,__a : Dict ,__a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ):
get_dataset_config_info(__a ,config_name=__a )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = get_dataset_config_names(__a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def __UpperCAmelCase ( __a : int ,__a : int ,__a : Optional[int] ) -> str:
"""simple docstring"""
_a : Union[str, Any] = get_dataset_infos(__a )
assert list(infos.keys() ) == expected_configs
_a : Optional[Any] = expected_configs[0]
assert expected_config in infos
_a : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Optional[int] ,__a : Dict ) -> List[Any]:
"""simple docstring"""
_a : str = get_dataset_infos(__a )
assert expected_config in infos
_a : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def __UpperCAmelCase ( __a : Optional[int] ,__a : Dict ,__a : Tuple ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ):
get_dataset_split_names(__a ,config_name=__a )
| 235
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """num_attention_heads""" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : Tuple=[1_2_8, 2_5_6, 3_8_4] , lowerCAmelCase__ : int=[4, 6, 8] , lowerCAmelCase__ : List[str]=[2, 3, 4] , lowerCAmelCase__ : List[Any]=[1_6, 1_6, 1_6] , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : Optional[Any]=[2, 2, 2] , lowerCAmelCase__ : Optional[Any]=[2, 2, 2] , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : Tuple = batch_size
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : str = kernel_size
__SCREAMING_SNAKE_CASE : Tuple = stride
__SCREAMING_SNAKE_CASE : str = padding
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = depths
__SCREAMING_SNAKE_CASE : Tuple = key_dim
__SCREAMING_SNAKE_CASE : int = drop_path_rate
__SCREAMING_SNAKE_CASE : List[Any] = patch_size
__SCREAMING_SNAKE_CASE : int = attention_ratio
__SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : Dict = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : int = use_labels
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = image_size[0], image_size[1]
for _ in range(4 ):
__SCREAMING_SNAKE_CASE : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__SCREAMING_SNAKE_CASE : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_A : int = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[Any] = False
_A : Dict = False
_A : Dict = False
_A : List[str] = False
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = LevitModelTester(self )
__SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=3_7 )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str ):
__SCREAMING_SNAKE_CASE : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE : Any = outputs.hidden_states
__SCREAMING_SNAKE_CASE : str = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
__SCREAMING_SNAKE_CASE : Any = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__SCREAMING_SNAKE_CASE : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[str] = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
__SCREAMING_SNAKE_CASE : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[int] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__SCREAMING_SNAKE_CASE : List[str] = problem_type["""title"""]
__SCREAMING_SNAKE_CASE : List[str] = problem_type["""num_labels"""]
__SCREAMING_SNAKE_CASE : Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
__SCREAMING_SNAKE_CASE : List[str] = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
__SCREAMING_SNAKE_CASE : Any = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(**_UpperCAmelCase )
# verify the logits
__SCREAMING_SNAKE_CASE : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 112
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionDiffEditPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
lowercase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0 ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe_loaded(**_UpperCAmelCase )[0]
lowercase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
lowercase__ = pipe.generate_mask(**_UpperCAmelCase )
lowercase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ = np.array([0] * 9 )
lowercase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
lowercase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase__ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase__ = raw_image
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 305
| 0
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_SCREAMING_SNAKE_CASE = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE__ ( __a=None , __a=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Dict = 42
__magic_name__: Optional[Any] = 42
__magic_name__: List[str] = 42
__magic_name__: int = 42
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = 42
__magic_name__: str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[str] = False
__magic_name__: Optional[Any] = True
__magic_name__: Optional[Any] = None
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: List[str] = "titi"
__magic_name__: Any = "toto"
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: List[str] = "titi"
__magic_name__: List[str] = "toto"
__magic_name__: Tuple = 42
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Any = "toto"
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Union[str, Any] = "toto"
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : str = MixedTypeEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Optional[Any] = None
__magic_name__: Union[str, Any] = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__magic_name__: Optional[Any] = None
__magic_name__: Dict = list_field(default=[] )
__magic_name__: List[str] = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[Any] = list_field(default=[] )
__magic_name__: Any = list_field(default=[1, 2, 3] )
__magic_name__: List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
__magic_name__: Union[str, Any] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field()
__magic_name__: Dict = field()
__magic_name__: int = field()
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = BasicEnum(self.required_enum )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Optional[Any] = 42
__magic_name__: Optional[Any] = field()
__magic_name__: Any = None
__magic_name__: List[str] = field(default="toto" , metadata={"help": "help message"} )
__magic_name__: Optional[Any] = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Optional[Any] = False
__magic_name__: List[str] = True
__magic_name__: Tuple = None
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[Any] = None
__magic_name__: Optional[int] = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__magic_name__: str = None
__magic_name__: str = list_field(default=[] )
__magic_name__: List[str] = list_field(default=[] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict , _A : argparse.ArgumentParser , _A : argparse.ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ : Tuple = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'}
snake_case_ : Any = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _UpperCAmelCase ) and yy.get('choices' , _UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_UpperCAmelCase ) , yy['type'](_UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Union[str, Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((snake_case_ ) , ) : Dict = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Any = HfArgumentParser(_UpperCAmelCase )
snake_case_ : str = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz' )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase )
snake_case_ : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
snake_case_ : int = HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : str = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : int = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : str = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : int = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
"""simple docstring"""
snake_case_ : int = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : List[str] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
snake_case_ : Dict = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ : Dict = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
snake_case_ : Tuple = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ : List[Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
snake_case_ : Tuple = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Dict = "toto"
snake_case_ : Optional[int] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : int = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
snake_case_ : Optional[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
snake_case_ : Optional[int] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : str = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Tuple = parser.parse_args([] )
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ : List[Any] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message' )
expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase )
expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase )
snake_case_ : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
snake_case_ : Tuple = HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[] ) )
snake_case_ : Dict = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.1_4 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Tuple = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
snake_case_ : Any = parser.parse_dict(_UpperCAmelCase )[0]
snake_case_ : List[Any] = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : Tuple = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Optional[Any] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Any = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Tuple = os.path.join(_UpperCAmelCase , 'temp_json' )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
snake_case_ : Tuple = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : str = HfArgumentParser(_UpperCAmelCase )
snake_case_ : int = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = os.path.join(_UpperCAmelCase , 'temp_yaml' )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : int = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
snake_case_ : str = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : Any = HfArgumentParser(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 327
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = '▁'
lowercase__ : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowercase__ : str = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowercase__ : Any = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowercase__ : Dict = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowercase__ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class _UpperCAmelCase ( UpperCAmelCase__):
_lowerCAmelCase : int = VOCAB_FILES_NAMES
_lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
_lowerCAmelCase : List[str] = []
def __init__( self : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int]="<s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Optional[Any]="<pad>" , lowercase_ : int="<unk>" , lowercase_ : Any=False , lowercase_ : Dict=False , lowercase_ : List[str]=None , lowercase_ : List[str]=None , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[Any] , ):
snake_case_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
snake_case_ : str = do_upper_case
snake_case_ : Dict = do_lower_case
snake_case_ : str = load_json(_UpperCAmelCase )
snake_case_ : str = {v: k for k, v in self.encoder.items()}
snake_case_ : Union[str, Any] = spm_file
snake_case_ : Dict = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
snake_case_ : str = lang_codes
snake_case_ : Optional[int] = LANGUAGES[lang_codes]
snake_case_ : List[Any] = [f"<lang:{lang}>" for lang in self.langs]
snake_case_ : Tuple = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
snake_case_ : Union[str, Any] = self.lang_tokens
snake_case_ : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
snake_case_ : str = {}
@property
def _snake_case ( self : str ):
return len(self.encoder )
@property
def _snake_case ( self : List[Any] ):
return self._tgt_lang
@tgt_lang.setter
def _snake_case ( self : Any , lowercase_ : int ):
snake_case_ : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def _snake_case ( self : str , lowercase_ : str ):
snake_case_ : int = self.lang_code_to_id[tgt_lang]
snake_case_ : str = [lang_code_id]
def _snake_case ( self : Tuple , lowercase_ : str ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _snake_case ( self : Tuple , lowercase_ : int ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def _snake_case ( self : Optional[int] , lowercase_ : int ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def _snake_case ( self : Optional[Any] , lowercase_ : List[str] ):
snake_case_ : Any = []
snake_case_ : Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
snake_case_ : Any = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
snake_case_ : Dict = []
else:
current_sub_tokens.append(_UpperCAmelCase )
snake_case_ : int = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _snake_case ( self : Dict , lowercase_ : List[Any] , lowercase_ : str=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
snake_case_ : int = [1] * len(self.prefix_tokens )
snake_case_ : int = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def _snake_case ( self : List[str] ):
snake_case_ : Any = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
snake_case_ : Optional[Any] = self.__dict__.copy()
snake_case_ : Tuple = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Dict ):
snake_case_ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ : Optional[int] = {}
snake_case_ : Dict = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None ):
snake_case_ : Any = Path(_UpperCAmelCase )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
snake_case_ : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
snake_case_ : List[str] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
snake_case_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def __lowercase ( _a , _a ):
snake_case_ : Any = sentencepiece.SentencePieceProcessor(**_a )
spm.Load(str(_a ) )
return spm
def __lowercase ( _a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
def __lowercase ( _a , _a ):
with open(_a , '''w''' ) as f:
json.dump(_a , _a , indent=2 )
| 264
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 0
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a__ : List[str] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase_ ( UpperCAmelCase__ ):
__UpperCAmelCase = field(default=UpperCAmelCase__ , metadata={'help': 'Whether to use SortishSampler or not.'} )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
__UpperCAmelCase = field(
default=UpperCAmelCase__ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def __a ( self ):
UpperCamelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ = v.to_dict()
return d
| 80
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 0
|
'''simple docstring'''
import torch
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
if torch.cuda.is_available():
_A = torch.cuda.device_count()
else:
_A = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 79
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = ShapEImgaImgPipeline
A__ = ['''image''']
A__ = ['''image''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowercase__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 305
| 0
|
"""simple docstring"""
from math import factorial
__UpperCAmelCase = {str(digit): factorial(digit) for digit in range(10)}
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase__ ) )
def _snake_case ( lowercase__ : int = 6_0 , lowercase__ : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCAmelCase_ :Optional[Any] = 0
# the cached sizes of the previous chains
lowerCAmelCase_ :List[str] = {}
for start_chain_element in range(1 , lowercase__ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase_ :Any = set()
lowerCAmelCase_ :int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase_ :Optional[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase__ )
chain_set_length += 1
lowerCAmelCase_ :Dict = digit_factorial_sum(lowercase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase_ :Dict = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 84
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" )
lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 305
| 0
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> Dict:
__lowerCamelCase = s.rsplit(__lowerCAmelCase , __lowerCAmelCase )
return new.join(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Any:
__lowerCamelCase = {}
__lowerCamelCase = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowerCamelCase = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
__lowerCamelCase = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
__lowerCamelCase = rreplace(__lowerCAmelCase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
__lowerCamelCase = rreplace(__lowerCAmelCase , '''.b''' , '''.bias''' , 1 )
__lowerCamelCase = value.float()
return upgrade
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=None , __lowerCAmelCase : Tuple=True ) -> Any:
from dall_e import Encoder
__lowerCamelCase = Encoder()
if os.path.exists(__lowerCAmelCase ):
__lowerCamelCase = torch.load(__lowerCAmelCase )
else:
__lowerCamelCase = torch.hub.load_state_dict_from_url(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = ckpt.state_dict()
encoder.load_state_dict(__lowerCAmelCase )
if config_path is not None:
__lowerCamelCase = FlavaImageCodebookConfig.from_pretrained(__lowerCAmelCase )
else:
__lowerCamelCase = FlavaImageCodebookConfig()
__lowerCamelCase = FlavaImageCodebook(__lowerCAmelCase ).eval()
__lowerCamelCase = encoder.state_dict()
__lowerCamelCase = upgrade_state_dict(__lowerCAmelCase )
hf_model.load_state_dict(__lowerCAmelCase )
__lowerCamelCase = hf_model.state_dict()
__lowerCamelCase = count_parameters(__lowerCAmelCase )
__lowerCamelCase = count_parameters(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowerCAmelCase )
else:
return hf_state_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 270
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305
| 0
|
from collections.abc import Callable
class __lowercase :
def __init__( self , A_ = None ) ->None:
'''simple docstring'''
__lowerCAmelCase : str = []
# Stores indexes of each item for supporting updates and deletion.
__lowerCAmelCase : str = {}
# Stores current size of heap.
__lowerCAmelCase : List[str] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowerCAmelCase : Dict = key or (lambda A_ : x)
def UpperCamelCase__ ( self , A_ ) ->int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def UpperCamelCase__ ( self , A_ ) ->int | None:
'''simple docstring'''
__lowerCAmelCase : Tuple = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCamelCase__ ( self , A_ ) ->int | None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCamelCase__ ( self , A_ , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.arr[j], self.arr[i]
def UpperCamelCase__ ( self , A_ , A_ ) ->bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self._left(_UpperCAmelCase )
__lowerCAmelCase : Any = self._right(_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = i
if left is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase : Optional[int] = left
if right is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase : Tuple = right
return valid_parent
def UpperCamelCase__ ( self , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : int = self._parent(_UpperCAmelCase )
while parent is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
self._swap(_UpperCAmelCase , _UpperCAmelCase )
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = parent, self._parent(_UpperCAmelCase )
def UpperCamelCase__ ( self , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : Dict = self._get_valid_parent(_UpperCAmelCase )
while valid_parent != index:
self._swap(_UpperCAmelCase , _UpperCAmelCase )
__lowerCAmelCase, __lowerCAmelCase : int = valid_parent, self._get_valid_parent(_UpperCAmelCase )
def UpperCamelCase__ ( self , A_ , A_ ) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
__lowerCAmelCase : Tuple = self.pos_map[item]
__lowerCAmelCase : Optional[Any] = [item, self.key(_UpperCAmelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_UpperCAmelCase )
self._heapify_down(_UpperCAmelCase )
def UpperCamelCase__ ( self , A_ ) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
__lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
__lowerCAmelCase : List[Any] = self.arr[self.size - 1]
__lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_UpperCAmelCase )
self._heapify_down(_UpperCAmelCase )
def UpperCamelCase__ ( self , A_ , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_UpperCAmelCase )] )
else:
__lowerCAmelCase : Any = [item, self.key(_UpperCAmelCase )]
__lowerCAmelCase : int = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCamelCase__ ( self ) ->tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def UpperCamelCase__ ( self ) ->tuple | None:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 0
|
"""simple docstring"""
import random
class a :
"""simple docstring"""
@staticmethod
def UpperCamelCase ( UpperCamelCase: str ):
"""simple docstring"""
A__ = [ord(_UpperCAmelCase ) for i in text]
A__ = []
A__ = []
for i in plain:
A__ = random.randint(1 , 3_00 )
A__ = (i + k) * k
cipher.append(_UpperCAmelCase )
key.append(_UpperCAmelCase )
return cipher, key
@staticmethod
def UpperCamelCase ( UpperCamelCase: list[int] , UpperCamelCase: list[int] ):
"""simple docstring"""
A__ = []
for i in range(len(_UpperCAmelCase ) ):
A__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_UpperCAmelCase ) )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 335
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __snake_case ( UpperCAmelCase__):
snake_case__ : Any = "trocr"
snake_case__ : List[str] = ["past_key_values"]
snake_case__ : Any = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : Tuple , __lowerCAmelCase : Dict=5_0_2_6_5 , __lowerCAmelCase : int=1_0_2_4 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : List[Any]=4_0_9_6 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=5_1_2 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : int=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Union[str, Any] = d_model
_lowerCamelCase : str = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : Dict = activation_function
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : str = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : Dict = activation_dropout
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Tuple = decoder_layerdrop
_lowerCamelCase : int = use_cache
_lowerCamelCase : Optional[Any] = scale_embedding
_lowerCamelCase : Dict = use_learned_position_embeddings
_lowerCamelCase : str = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 72
|
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
UpperCAmelCase__ : List[Any] = ["accelerate", "launch"]
UpperCAmelCase__ : Union[str, Any] = Path.home() / ".cache/huggingface/accelerate"
UpperCAmelCase__ : Optional[int] = "default_config.yaml"
UpperCAmelCase__ : List[str] = config_folder / config_file
UpperCAmelCase__ : Optional[int] = config_folder / "_default_config.yaml"
UpperCAmelCase__ : Tuple = Path("tests/test_configs" )
@classmethod
def __lowercase ( cls ) -> int:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __lowercase ( cls ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __lowercase ( self ) -> Optional[Any]:
_a : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def __lowercase ( self ) -> Any:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def __lowercase ( self ) -> Any:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "test-tpu"
UpperCAmelCase__ : Dict = "us-central1-a"
UpperCAmelCase__ : Optional[int] = "ls"
UpperCAmelCase__ : int = ["accelerate", "tpu-config"]
UpperCAmelCase__ : int = "cd /usr/share"
UpperCAmelCase__ : Optional[Any] = "tests/test_samples/test_command_file.sh"
UpperCAmelCase__ : Optional[int] = "Running gcloud compute tpus tpu-vm ssh"
def __lowercase ( self ) -> int:
_a : str = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> List[str]:
_a : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> int:
_a : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_UpperCAmelCase )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> Tuple:
_a : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo \"Hello World\"''',
'''--debug''',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> Optional[int]:
_a : Dict = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> Tuple:
_a : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def __lowercase ( self ) -> str:
_a : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 235
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[str] = BlipImageProcessor()
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(_UpperCAmelCase , _UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Optional[int] , **lowerCAmelCase__ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).tokenizer
def UpperCamelCase__ ( self : Dict , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : Any = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = BlipaProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[str] = image_processor(_UpperCAmelCase , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE : Any = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BlipaProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = """lower newer"""
__SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = """lower newer"""
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = BlipaProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = """lower newer"""
__SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 112
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: Dict = "vision-encoder-decoder"
__magic_name__: Tuple = True
def __init__( self : int , **_A : Tuple ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case_ : int = kwargs.pop('encoder' )
snake_case_ : Optional[int] = encoder_config.pop('model_type' )
snake_case_ : str = kwargs.pop('decoder' )
snake_case_ : Tuple = decoder_config.pop('model_type' )
snake_case_ : Optional[Any] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ : List[str] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ : Union[str, Any] = True
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Dict ) -> PretrainedConfig:
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
snake_case_ : Any = True
snake_case_ : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : int = self.encoder.to_dict()
snake_case_ : Tuple = self.decoder.to_dict()
snake_case_ : Dict = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: List[Any] = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
"""simple docstring"""
return 1E-4
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
@property
def UpperCAmelCase_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
snake_case_ : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
snake_case_ : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
snake_case_ : int = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def UpperCAmelCase_ ( self : List[Any] , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
import torch
snake_case_ : Tuple = OrderedDict()
snake_case_ : List[str] = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
snake_case_ ,snake_case_ : Union[str, Any] = dummy_input['input_ids'].shape
snake_case_ : List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case_ : str = dummy_input.pop('input_ids' )
snake_case_ : List[str] = dummy_input.pop('attention_mask' )
snake_case_ : Optional[Any] = torch.zeros(_UpperCAmelCase )
return common_inputs
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
@property
def UpperCAmelCase_ ( self : Any ) -> None:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple , _A : PretrainedConfig ) -> OnnxConfig:
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default" ) -> OnnxConfig:
"""simple docstring"""
snake_case_ : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 327
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
| 0
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = OmegaConf.load(_a )
snake_case_ : Dict = torch.load(_a , map_location='''cpu''' )['''model''']
snake_case_ : str = list(state_dict.keys() )
# extract state_dict for VQVAE
snake_case_ : int = {}
snake_case_ : List[Any] = '''first_stage_model.'''
for key in keys:
if key.startswith(_a ):
snake_case_ : Tuple = state_dict[key]
# extract state_dict for UNetLDM
snake_case_ : List[str] = {}
snake_case_ : Tuple = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_a ):
snake_case_ : List[str] = state_dict[key]
snake_case_ : Optional[int] = config.model.params.first_stage_config.params
snake_case_ : str = config.model.params.unet_config.params
snake_case_ : Tuple = VQModel(**_a ).eval()
vqvae.load_state_dict(_a )
snake_case_ : Optional[Any] = UNetLDMModel(**_a ).eval()
unet.load_state_dict(_a )
snake_case_ : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_a , )
snake_case_ : str = LDMPipeline(_a , _a , _a )
pipeline.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowercase__ : Dict = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 264
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Optional[int] = 1_0_0
A : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( __magic_name__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 305
| 0
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCamelCase ( __A , __A , __A = "x" , __A = 10**-10 , __A = 1 , ) -> complex:
'''simple docstring'''
UpperCamelCase__ = symbols(__A )
UpperCamelCase__ = lambdify(__A , __A )
UpperCamelCase__ = lambdify(__A , diff(__A , __A ) )
UpperCamelCase__ = starting_point
while True:
if diff_function(__A ) != 0:
UpperCamelCase__ = prev_guess - multiplicity * func(__A ) / diff_function(
__A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
F"""{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 80
|
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305
| 0
|
'''simple docstring'''
import math
lowerCamelCase_ = 10
lowerCamelCase_ = 7
lowerCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def __lowercase ( __lowercase = 20 ) -> str:
'''simple docstring'''
_A = math.comb(__lowercase , __lowercase )
_A = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowercase )
_A = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 79
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305
| 0
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__UpperCAmelCase = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = """https://pypi.org/pypi/diffusers/json"""
lowerCAmelCase_ :List[Any] = json.loads(request.urlopen(lowercase__ ).read() )["""releases"""].keys()
return sorted(lowercase__ , key=lambda lowercase__ : version.Version(lowercase__ ) )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase_ :List[str] = Path(lowercase__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _snake_case ( lowercase__ : Union[str, os.PathLike] ) -> List[str]:
'''simple docstring'''
init_hf_modules()
lowerCAmelCase_ :int = Path(lowercase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase_ :Tuple = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _snake_case ( lowercase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = f.read()
# Imports of the form `import .xxx`
lowerCAmelCase_ :int = re.findall("""^\s*import\s+\.(\S+)\s*$""" , lowercase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , lowercase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowercase__ ) )
def _snake_case ( lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = False
lowerCAmelCase_ :List[str] = [module_file]
lowerCAmelCase_ :Any = []
# Let's recurse through all relative imports
while not no_change:
lowerCAmelCase_ :Optional[int] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowercase__ ) )
lowerCAmelCase_ :int = Path(lowercase__ ).parent
lowerCAmelCase_ :Tuple = [str(module_path / m ) for m in new_imports]
lowerCAmelCase_ :Tuple = [f for f in new_import_files if f not in all_relative_imports]
lowerCAmelCase_ :str = [f"""{f}.py""" for f in new_import_files]
lowerCAmelCase_ :Union[str, Any] = len(lowercase__ ) == 0
all_relative_imports.extend(lowercase__ )
return all_relative_imports
def _snake_case ( lowercase__ : int ) -> Dict:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = f.read()
# Imports of the form `import xxx`
lowerCAmelCase_ :Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" , lowercase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , lowercase__ , flags=re.MULTILINE )
# Only keep the top-level module
lowerCAmelCase_ :Union[str, Any] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
lowerCAmelCase_ :Union[str, Any] = list(set(lowercase__ ) )
lowerCAmelCase_ :Optional[int] = []
for imp in imports:
try:
importlib.import_module(lowercase__ )
except ImportError:
missing_packages.append(lowercase__ )
if len(lowercase__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{", ".join(lowercase__ )}. Run `pip install {" ".join(lowercase__ )}`""" )
return get_relative_imports(lowercase__ )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = module_path.replace(os.path.sep , """.""" )
lowerCAmelCase_ :List[str] = importlib.import_module(lowercase__ )
if class_name is None:
return find_pipeline_class(lowercase__ )
return getattr(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
lowerCAmelCase_ :Optional[int] = dict(inspect.getmembers(lowercase__ , inspect.isclass ) )
lowerCAmelCase_ :str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowercase__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
lowerCAmelCase_ :Dict = cls
return pipeline_class
def _snake_case ( lowercase__ : Union[str, os.PathLike] , lowercase__ : str , lowercase__ : Optional[Union[str, os.PathLike]] = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[Dict[str, str]] = None , lowercase__ : Optional[Union[bool, str]] = None , lowercase__ : Optional[str] = None , lowercase__ : bool = False , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(lowercase__ )
lowerCAmelCase_ :List[str] = os.path.join(lowercase__ , lowercase__ )
if os.path.isfile(lowercase__ ):
lowerCAmelCase_ :List[Any] = module_file_or_url
lowerCAmelCase_ :Union[str, Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
lowerCAmelCase_ :Tuple = get_diffusers_versions()
# cut ".dev0"
lowerCAmelCase_ :Any = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
lowerCAmelCase_ :Dict = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
lowerCAmelCase_ :List[Any] = f"""v{revision}"""
elif revision == "main":
lowerCAmelCase_ :Tuple = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
lowerCAmelCase_ :Any = COMMUNITY_PIPELINES_URL.format(revision=lowercase__ , pipeline=lowercase__ )
try:
lowerCAmelCase_ :Any = cached_download(
lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , proxies=lowercase__ , resume_download=lowercase__ , local_files_only=lowercase__ , use_auth_token=lowercase__ , )
lowerCAmelCase_ :Any = """git"""
lowerCAmelCase_ :str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
lowerCAmelCase_ :Optional[Any] = hf_hub_download(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , proxies=lowercase__ , resume_download=lowercase__ , local_files_only=lowercase__ , use_auth_token=lowercase__ , )
lowerCAmelCase_ :Any = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
lowerCAmelCase_ :str = check_imports(lowercase__ )
# Now we move the module inside our cached dynamic modules.
lowerCAmelCase_ :Tuple = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowercase__ )
lowerCAmelCase_ :Optional[Any] = Path(lowercase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowercase__ , submodule_path / module_file )
for module_needed in modules_needed:
lowerCAmelCase_ :Tuple = f"""{module_needed}.py"""
shutil.copy(os.path.join(lowercase__ , lowercase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :Union[str, Any] = use_auth_token
elif use_auth_token is True:
lowerCAmelCase_ :Any = HfFolder.get_token()
else:
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Dict = model_info(lowercase__ , revision=lowercase__ , token=lowercase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCAmelCase_ :Optional[int] = submodule_path / commit_hash
lowerCAmelCase_ :Any = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowercase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowercase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowercase__ , f"""{module_needed}.py""" , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
return os.path.join(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Union[str, os.PathLike] , lowercase__ : str , lowercase__ : Optional[str] = None , lowercase__ : Optional[Union[str, os.PathLike]] = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[Dict[str, str]] = None , lowercase__ : Optional[Union[bool, str]] = None , lowercase__ : Optional[str] = None , lowercase__ : bool = False , **lowercase__ : str , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = get_cached_module_file(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
return get_class_in_module(lowercase__ , final_module.replace(""".py""" , """""" ) )
| 84
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
SCREAMING_SNAKE_CASE__ : Tuple = {'UserAgent': UserAgent().random}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> dict:
__lowerCamelCase = script.contents[0]
__lowerCamelCase = json.loads(data[data.find('''{\"config\"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
__lowerCamelCase = f'''https://www.instagram.com/{username}/'''
__lowerCamelCase = self.get_json()
def __A ( self : Optional[Any] ) -> dict:
__lowerCamelCase = requests.get(self.url , headers=_UpperCAmelCase ).text
__lowerCamelCase = BeautifulSoup(_UpperCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : Optional[Any] ) -> str:
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __A ( self : Optional[Any] ) -> str:
return self.user_data["username"]
@property
def __A ( self : Union[str, Any] ) -> str:
return self.user_data["full_name"]
@property
def __A ( self : str ) -> str:
return self.user_data["biography"]
@property
def __A ( self : str ) -> str:
return self.user_data["business_email"]
@property
def __A ( self : List[str] ) -> str:
return self.user_data["external_url"]
@property
def __A ( self : Tuple ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __A ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __A ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __A ( self : Optional[int] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __A ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __A ( self : Optional[int] ) -> bool:
return self.user_data["is_private"]
def __magic_name__ ( __lowerCAmelCase : str = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowerCamelCase = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Optional[int] = InstagramUser("github")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 270
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class __lowercase (UpperCAmelCase__ ):
_UpperCamelCase = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_UpperCamelCase = Features({"""text""": Value("""string""" )} )
_UpperCamelCase = Features({"""summary""": Value("""string""" )} )
_UpperCamelCase = """text"""
_UpperCamelCase = """summary"""
@property
def UpperCamelCase__ ( self ) ->Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 275
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 0
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if b < 0:
return 1 / actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
return actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 335
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def snake_case_ ( A_ : Tuple, A_ : str, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(A_, config=A_ )
_lowerCamelCase : List[Any] = downstream_dict['''projector.weight''']
_lowerCamelCase : int = downstream_dict['''projector.bias''']
_lowerCamelCase : Optional[int] = downstream_dict['''model.post_net.linear.weight''']
_lowerCamelCase : Dict = downstream_dict['''model.post_net.linear.bias''']
return model
def snake_case_ ( A_ : Optional[int], A_ : Dict, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(A_, config=A_ )
_lowerCamelCase : Union[str, Any] = downstream_dict['''model.linear.weight''']
_lowerCamelCase : Any = downstream_dict['''model.linear.bias''']
return model
def snake_case_ ( A_ : int, A_ : List[Any], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(A_, config=A_ )
_lowerCamelCase : Any = downstream_dict['''connector.weight''']
_lowerCamelCase : Union[str, Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCamelCase : List[Any] = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_lowerCamelCase : Union[str, Any] = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_lowerCamelCase : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_lowerCamelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_lowerCamelCase : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_lowerCamelCase : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_lowerCamelCase : Optional[int] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def snake_case_ ( A_ : Optional[int], A_ : Any, A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : int = checkpoint['''Downstream''']
_lowerCamelCase : Dict = UniSpeechSatConfig.from_pretrained(A_ )
_lowerCamelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(
A_, return_attention_mask=A_, do_normalize=A_ )
_lowerCamelCase : Any = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_lowerCamelCase : Optional[Any] = convert_classification(A_, A_, A_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
_lowerCamelCase : Tuple = convert_diarization(A_, A_, A_ )
elif arch.endswith('''ForXVector''' ):
_lowerCamelCase : int = convert_xvector(A_, A_, A_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_lowerCamelCase : Union[str, Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 72
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305
| 0
|
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Tuple:
_a : Any = 0
_a : List[Any] = 0
_a : str = {}
def __lowercase ( self , _a ) -> Optional[int]:
if vertex not in self.adjacency:
_a : Optional[int] = {}
self.num_vertices += 1
def __lowercase ( self , _a , _a , _a ) -> Tuple:
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
_a : List[str] = weight
_a : List[Any] = weight
def __lowercase ( self ) -> Optional[int]:
_a : Tuple = self.get_edges()
for edge in edges:
_a , _a , _a : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
_a : Union[str, Any] = list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : List[Any] = edges[i][2] + 1
for edge in edges:
_a , _a , _a : List[str] = edge
_a : str = weight
_a : Dict = weight
def __str__( self ) -> Union[str, Any]:
_a : List[Any] = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : str = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('''\n''' )
def __lowercase ( self ) -> str:
_a : Tuple = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowercase ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def __lowercase ( _a=None , _a=None ) -> Union[str, Any]:
_a : Union[str, Any] = Graph()
if vertices is None:
_a : Optional[Any] = []
if edges is None:
_a : int = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> str:
_a : Dict = {}
_a : str = {}
def __len__( self ) -> Dict:
return len(self.parent )
def __lowercase ( self , _a ) -> Any:
if item in self.parent:
return self.find(_UpperCAmelCase )
_a : Union[str, Any] = item
_a : Optional[int] = 0
return item
def __lowercase ( self , _a ) -> Any:
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
_a : Optional[Any] = self.find(self.parent[item] )
return self.parent[item]
def __lowercase ( self , _a , _a ) -> Optional[int]:
_a : int = self.find(_UpperCAmelCase )
_a : Optional[int] = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : Optional[Any] = roota
return roota
return None
@staticmethod
def __lowercase ( _a ) -> Optional[int]:
_a : List[str] = graph.num_vertices
_a : int = Graph.UnionFind()
_a : int = []
while num_components > 1:
_a : Dict = {}
for vertex in graph.get_vertices():
_a : Optional[Any] = -1
_a : List[str] = graph.get_edges()
for edge in edges:
_a , _a , _a : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : Tuple = edge
_a : Any = union_find.find(_UpperCAmelCase )
_a : Union[str, Any] = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Any = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
_a : Tuple = num_components - 1
_a : Dict = Graph.build(edges=_UpperCAmelCase )
return mst
| 235
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( _lowerCamelCase: list , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Any = len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = int(math.floor(math.sqrt(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : str = 0
while arr[min(_lowerCamelCase , _lowerCamelCase ) - 1] < x:
__SCREAMING_SNAKE_CASE : Tuple = step
step += int(math.floor(math.sqrt(_lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__SCREAMING_SNAKE_CASE : Tuple = prev + 1
if prev == min(_lowerCamelCase , _lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCamelCase__ : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : Tuple = [int(item) for item in user_input.split(''',''')]
UpperCamelCase__ : Union[str, Any] = int(input('''Enter the number to be searched:\n'''))
UpperCamelCase__ : Optional[Any] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f"Number {x} is at index {res}")
| 112
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionDiffEditPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
lowercase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0 ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe_loaded(**_UpperCAmelCase )[0]
lowercase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
lowercase__ = pipe.generate_mask(**_UpperCAmelCase )
lowercase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ = np.array([0] * 9 )
lowercase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
lowercase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase__ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase__ = raw_image
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 305
| 0
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("""KT""")
_SCREAMING_SNAKE_CASE = TypeVar("""VT""")
class SCREAMING_SNAKE_CASE_ ( Generic[KT, VT] ):
def __init__( self : List[Any] , _A : KT | str = "root" , _A : VT | None = None ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = key
snake_case_ : Union[str, Any] = value
snake_case_ : Tuple = []
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return F"""Node({self.key}: {self.value})"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.forward )
class SCREAMING_SNAKE_CASE_ ( Generic[KT, VT] ):
def __init__( self : Tuple , _A : float = 0.5 , _A : int = 16 ) -> int:
"""simple docstring"""
snake_case_ : Any = Node[KT, VT]()
snake_case_ : List[str] = 0
snake_case_ : Union[str, Any] = p
snake_case_ : Dict = max_level
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = list(self )
if len(_UpperCAmelCase ) == 0:
return F"""SkipList(level={self.level})"""
snake_case_ : Optional[int] = max((len(str(_UpperCAmelCase ) ) for item in items) , default=4 )
snake_case_ : List[Any] = max(_UpperCAmelCase , 4 ) + 4
snake_case_ : int = self.head
snake_case_ : List[Any] = []
snake_case_ : Tuple = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(_UpperCAmelCase , '-' ) + '* ' * len(_UpperCAmelCase ) )
lines.append(' ' * label_size + '| ' * len(_UpperCAmelCase ) )
while len(node.forward ) != 0:
snake_case_ : int = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(_UpperCAmelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(_UpperCAmelCase ) )
snake_case_ : str = node.forward
lines.append('None'.ljust(_UpperCAmelCase ) + '* ' * len(_UpperCAmelCase ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(_UpperCAmelCase )
def __iter__( self : Any ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : List[str] = node.forward[0]
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ : Any = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self : Optional[Any] , _A : Union[str, Any] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
snake_case_ : List[Any] = []
snake_case_ : Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self : List[str] , _A : KT ) -> Optional[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self._locate_node(_UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(_UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Dict = update_node.forward[:i]
def UpperCAmelCase_ ( self : Optional[Any] , _A : KT , _A : VT ) -> str:
"""simple docstring"""
snake_case_ ,snake_case_ : int = self._locate_node(_UpperCAmelCase )
if node is not None:
snake_case_ : Optional[Any] = value
else:
snake_case_ : Tuple = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _UpperCAmelCase ):
update_vector.append(self.head )
snake_case_ : Tuple = level
snake_case_ : Optional[Any] = Node(_UpperCAmelCase , _UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_UpperCAmelCase )
else:
snake_case_ : Union[str, Any] = new_node
def UpperCAmelCase_ ( self : Any , _A : VT ) -> VT | None:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self._locate_node(_UpperCAmelCase )
if node is not None:
return node.value
return None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
snake_case_ : List[Any] = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : str = node.forward[0]
snake_case_ : int = node.value
assert len(__a ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : int = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
snake_case_ : Tuple = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Union[str, Any] = node.forward[0]
snake_case_ : Optional[int] = node.value
if len(__a ) != 4:
print()
assert len(__a ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : str = SkipList()
assert skip_list.find('Some key' ) is None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : List[str] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Any = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 1_42 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(__a ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__a )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def SCREAMING_SNAKE_CASE__ ( ):
def is_sorted(__a ):
return all(next_item >= item for item, next_item in zip(__a , lst[1:] ) )
snake_case_ : Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(__a , __a )
assert is_sorted(list(__a ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__a ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__a ) )
def SCREAMING_SNAKE_CASE__ ( ):
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 327
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305
| 0
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
import sys
from collections import defaultdict
class __UpperCAmelCase :
def __init__( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pos
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Tuple , UpperCAmelCase_: List[str] ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_SCREAMING_SNAKE_CASE = 2 * start + 1
else:
_SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = temp, tempa
_SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase_ )
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = position[index]
while index != 0:
_SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_SCREAMING_SNAKE_CASE = heap[parent]
_SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] , UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = val
_SCREAMING_SNAKE_CASE = temp
self.set_position(UpperCAmelCase_ , UpperCAmelCase_ )
break
_SCREAMING_SNAKE_CASE = parent
else:
_SCREAMING_SNAKE_CASE = val
_SCREAMING_SNAKE_CASE = temp
self.set_position(UpperCAmelCase_ , 0 )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ ) // 2 - 1
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = positions[0]
_SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return temp
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Heap()
_SCREAMING_SNAKE_CASE = [0] * len(snake_case__ )
_SCREAMING_SNAKE_CASE = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
_SCREAMING_SNAKE_CASE = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = distance
heap.heapify(snake_case__ ,snake_case__ )
for _ in range(1 ,len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = heap.delete_minimum(snake_case__ ,snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
_SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
snake_case__ ,heap.get_position(snake_case__ ) ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCamelCase = int(input('''Enter number of edges: ''').strip())
UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: int , *UpperCAmelCase_: int , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = XLMRobertaTokenizer
__snake_case : Optional[Any] = XLMRobertaTokenizerFast
__snake_case : Optional[Any] = True
__snake_case : int = True
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """<pad>"""
_SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_002 )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCamelCase ( self: str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@cached_property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase_ , f.name )
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pickle.dumps(UpperCAmelCase_ )
pickle.loads(UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """Hello World!"""
_SCREAMING_SNAKE_CASE = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_SCREAMING_SNAKE_CASE = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"""input_ids""": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
UpperCamelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_SCREAMING_SNAKE_CASE = int(re.match(r""".*layer_(\d*).*""" ,snake_case__ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_SCREAMING_SNAKE_CASE = re.search(r"""[^\d](\d+)$""" ,str(snake_case__ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
_SCREAMING_SNAKE_CASE = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
if bloom_config_file == "":
_SCREAMING_SNAKE_CASE = BloomConfig()
else:
_SCREAMING_SNAKE_CASE = BloomConfig.from_json_file(snake_case__ )
if shard_model:
_SCREAMING_SNAKE_CASE = os.listdir(snake_case__ )
_SCREAMING_SNAKE_CASE = sorted(filter(lambda snake_case__ : s.startswith("""layer""" ) and "model_00" in s ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = {"""weight_map""": {}, """metadata""": {}}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BloomConfig()
for j, file in enumerate(snake_case__ ):
print("""Processing file: {}""".format(snake_case__ ) )
_SCREAMING_SNAKE_CASE = None
for i in range(snake_case__ ):
# load all TP files
_SCREAMING_SNAKE_CASE = file.replace("""model_00""" ,F'model_0{i}' )
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case__ ,snake_case__ ) ,map_location="""cpu""" )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE = temp.pop(snake_case__ )
if tensors is None:
_SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] ,dim=snake_case__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
torch.save(
snake_case__ ,os.path.join(
snake_case__ ,"""pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) ,str(len(snake_case__ ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
_SCREAMING_SNAKE_CASE = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_SCREAMING_SNAKE_CASE = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) ,str(len(snake_case__ ) ).zfill(5 ) )
_SCREAMING_SNAKE_CASE = BloomConfig()
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_SCREAMING_SNAKE_CASE = total_size
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(snake_case__ ,WEIGHTS_NAME + """.index.json""" ) ,"""w""" ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = json.dumps(snake_case__ ,indent=2 ,sort_keys=snake_case__ ) + """\n"""
f.write(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = BloomModel(snake_case__ )
_SCREAMING_SNAKE_CASE = os.listdir(snake_case__ )
_SCREAMING_SNAKE_CASE = sorted(filter(lambda snake_case__ : s.startswith("""layer""" ) and "model_00" in s ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = None
for i, file in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = None
for i in range(snake_case__ ):
# load all TP files
_SCREAMING_SNAKE_CASE = file.replace("""model_00""" ,F'model_0{i}' )
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case__ ,snake_case__ ) ,map_location="""cpu""" )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE = temp.pop(snake_case__ )
if tensors is None:
_SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] ,dim=snake_case__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
_SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
_SCREAMING_SNAKE_CASE = set(other_keys.missing_keys )
else:
_SCREAMING_SNAKE_CASE = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
_SCREAMING_SNAKE_CASE = model.to(config.torch_dtype )
torch.save(model.state_dict() ,snake_case__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
UpperCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 10_00
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ) ,"""r""" ) )
_SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = CvtConfig(num_labels=snake_case__ ,idalabel=snake_case__ ,labelaid=snake_case__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" ,1 )[-1][4:6] == "13":
_SCREAMING_SNAKE_CASE = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" ,1 )[-1][4:6] == "21":
_SCREAMING_SNAKE_CASE = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_SCREAMING_SNAKE_CASE = [2, 2, 20]
_SCREAMING_SNAKE_CASE = [3, 12, 16]
_SCREAMING_SNAKE_CASE = [1_92, 7_68, 10_24]
_SCREAMING_SNAKE_CASE = CvtForImageClassification(snake_case__ )
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location=torch.device("""cpu""" ) )
_SCREAMING_SNAKE_CASE = OrderedDict()
_SCREAMING_SNAKE_CASE = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_SCREAMING_SNAKE_CASE = list_of_state_dict + cls_token(snake_case__ )
_SCREAMING_SNAKE_CASE = list_of_state_dict + embeddings(snake_case__ )
for cnt in range(config.depth[idx] ):
_SCREAMING_SNAKE_CASE = list_of_state_dict + attention(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case__ )
for i in range(len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int]=99 , UpperCAmelCase_: Tuple=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: int=9 , UpperCAmelCase_: str=True , UpperCAmelCase_: int=True , UpperCAmelCase_: int=False , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Union[str, Any]=5 , UpperCAmelCase_: Dict=4 , UpperCAmelCase_: str=37 , UpperCAmelCase_: List[str]=8 , UpperCAmelCase_: str=0.1 , UpperCAmelCase_: Dict=0.0_02 , UpperCAmelCase_: Union[str, Any]=1 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: Optional[int]=0 , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: int=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = encoder_seq_length
_SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
_SCREAMING_SNAKE_CASE = self.decoder_seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = d_ff
_SCREAMING_SNAKE_CASE = relative_attention_num_buckets
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = decoder_start_token_id
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = decoder_layers
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Dict=None , UpperCAmelCase_: str=None , UpperCAmelCase_: str=None , ):
'''simple docstring'''
if attention_mask is None:
_SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase_ )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase_ )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
_SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
_SCREAMING_SNAKE_CASE = self.get_config()
_SCREAMING_SNAKE_CASE = config.num_attention_heads
_SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, input_dict
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self: int ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = UMTaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = result.last_hidden_state
_SCREAMING_SNAKE_CASE = result.past_key_values
_SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: int , UpperCAmelCase_: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = UMTaModel(config=UpperCAmelCase_ ).get_decoder().to(UpperCAmelCase_ ).eval()
# first forward pass
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
self.parent.assertTrue(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) + 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )["""last_hidden_state"""]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )["""last_hidden_state"""]
# select random slice
_SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = UMTaModel(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).half().eval()
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(UpperCAmelCase_ ).any().item() )
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__snake_case : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__snake_case : Optional[int] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__snake_case : Dict = True
__snake_case : Optional[Any] = False
__snake_case : str = False
__snake_case : Optional[int] = True
__snake_case : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__snake_case : str = [0.8, 0.9]
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=UpperCAmelCase_ , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = config_and_inputs[0]
_SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(UpperCAmelCase_ ).eval()
model.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase_ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase_ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase_ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase_ , head_masking.items() ):
_SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase_ , return_dict_in_generate=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase (unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=UpperCAmelCase_ ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=UpperCAmelCase_ , legacy=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase_ , return_tensors="""pt""" , padding=UpperCAmelCase_ ).input_ids
# fmt: off
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model.generate(input_ids.to(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 306
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 1
|
import json
import sys
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
with open(snake_case__ ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(snake_case__ ):
_SCREAMING_SNAKE_CASE = results[benchmark_name]
_SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
_SCREAMING_SNAKE_CASE = """| metric |"""
_SCREAMING_SNAKE_CASE = """|--------|"""
_SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(snake_case__ ):
_SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
_SCREAMING_SNAKE_CASE = metric_vals["""new"""]
_SCREAMING_SNAKE_CASE = metric_vals.get("""old""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = F' {new_val:f}' if isinstance(snake_case__ ,(int, float) ) else """None"""
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(snake_case__ ,(int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(snake_case__ ,(int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(snake_case__ ) )
if __name__ == "__main__":
UpperCamelCase = sys.argv[1]
UpperCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCamelCase = False
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return 12
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return 12
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_ )
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
_SCREAMING_SNAKE_CASE = TransformeraDModel(**UpperCAmelCase_ )
return model
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.dummy_vqvae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_transformer
_SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
_SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""np""" )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type="""np""" , return_dict=UpperCAmelCase_ , num_inference_steps=2 )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_SCREAMING_SNAKE_CASE = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.dummy_vqvae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_transformer
_SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
_SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""np""" )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type="""np""" , return_dict=UpperCAmelCase_ , num_inference_steps=2 )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_SCREAMING_SNAKE_CASE = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 306
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = "vit_mae"
def __init__( self: str , UpperCAmelCase_: Tuple=768 , UpperCAmelCase_: Dict=12 , UpperCAmelCase_: Dict=12 , UpperCAmelCase_: Dict=3_072 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: str=0.0 , UpperCAmelCase_: Dict=0.0 , UpperCAmelCase_: List[Any]=0.02 , UpperCAmelCase_: Tuple=1E-12 , UpperCAmelCase_: Dict=224 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Tuple=16 , UpperCAmelCase_: int=512 , UpperCAmelCase_: Tuple=8 , UpperCAmelCase_: str=2_048 , UpperCAmelCase_: Union[str, Any]=0.75 , UpperCAmelCase_: Union[str, Any]=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = decoder_num_attention_heads
_SCREAMING_SNAKE_CASE = decoder_hidden_size
_SCREAMING_SNAKE_CASE = decoder_num_hidden_layers
_SCREAMING_SNAKE_CASE = decoder_intermediate_size
_SCREAMING_SNAKE_CASE = mask_ratio
_SCREAMING_SNAKE_CASE = norm_pix_loss
| 306
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __UpperCAmelCase :
__snake_case : int
__snake_case : Node | None = None
__snake_case : Node | None = None
def __lowerCamelCase ( ) -> Node | None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Node(1 )
_SCREAMING_SNAKE_CASE = Node(2 )
_SCREAMING_SNAKE_CASE = Node(3 )
_SCREAMING_SNAKE_CASE = Node(4 )
_SCREAMING_SNAKE_CASE = Node(5 )
return tree
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def __lowerCamelCase ( snake_case__ ) -> Sequence[Node | None]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
if root is None:
return output
_SCREAMING_SNAKE_CASE = deque([root] )
while process_queue:
_SCREAMING_SNAKE_CASE = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Sequence[Node | None]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
def populate_output(snake_case__ ,snake_case__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(snake_case__ ,snake_case__ )
return output
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Sequence[Node | None]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
def populate_output(snake_case__ ,snake_case__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(snake_case__ ,snake_case__ )
return output
def __lowerCamelCase ( snake_case__ ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = height(snake_case__ )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case__ ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = 1
else:
output.append(get_nodes_from_right_to_left(snake_case__ ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = 0
return output
def __lowerCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
_SCREAMING_SNAKE_CASE = make_tree()
print(F'In-order Traversal: {inorder(snake_case__ )}' )
print(F'Pre-order Traversal: {preorder(snake_case__ )}' )
print(F'Post-order Traversal: {postorder(snake_case__ )}' ,"""\n""" )
print(F'Height of Tree: {height(snake_case__ )}' ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(snake_case__ ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(snake_case__ ) + 1 ):
print(F'Level {level}:' ,get_nodes_from_left_to_right(snake_case__ ,level=snake_case__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = VOCAB_FILES_NAMES
__snake_case : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : int = VOCAB_FILES_NAMES
__snake_case : Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_UpperCAmelCase )
class __UpperCAmelCase :
def __call__( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Union[bool, str] = False , UpperCAmelCase_: Union[bool, str] = False , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: Optional[bool] = None , **UpperCAmelCase_: int , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
elif titles is None or texts is None:
_SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = titles if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [titles]
_SCREAMING_SNAKE_CASE = texts if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [texts]
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = questions if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [questions] * n_passages
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
F'There should be as many titles than texts but got {len(UpperCAmelCase_ )} titles and {len(UpperCAmelCase_ )} texts.' )
_SCREAMING_SNAKE_CASE = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["""input_ids"""]
_SCREAMING_SNAKE_CASE = super().__call__(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["""input_ids"""]
_SCREAMING_SNAKE_CASE = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
}
if return_attention_mask is not False:
_SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_SCREAMING_SNAKE_CASE = attention_mask
return self.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: BatchEncoding , UpperCAmelCase_: DPRReaderOutput , UpperCAmelCase_: int = 16 , UpperCAmelCase_: int = 64 , UpperCAmelCase_: int = 4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = reader_input["""input_ids"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = reader_output[:3]
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sorted(range(UpperCAmelCase_ ) , reverse=UpperCAmelCase_ , key=relevance_logits.__getitem__ )
_SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
_SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase_ , top_spans=UpperCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase_ , start_index=UpperCAmelCase_ , end_index=UpperCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self: int , UpperCAmelCase_: List[int] , UpperCAmelCase_: List[int] , UpperCAmelCase_: int , UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(UpperCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] , reverse=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
_SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : str = VOCAB_FILES_NAMES
__snake_case : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__snake_case : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : int = READER_PRETRAINED_INIT_CONFIGURATION
__snake_case : Optional[int] = ["input_ids", "attention_mask"]
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase = list[list[float | int]]
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Matrix:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(snake_case__ )]
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for row in range(snake_case__ ):
for col in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = matrix[row][col]
_SCREAMING_SNAKE_CASE = vector[row][0]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
_SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case__ ,snake_case__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 ,snake_case__ ):
_SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
_SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 ,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 ,snake_case__ ):
for row in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(snake_case__ ,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] ,10 )] for row in range(snake_case__ )
]
def __lowerCamelCase ( snake_case__ ) -> Callable[[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0 for _ in range(snake_case__ )] for _ in range(snake_case__ )]
_SCREAMING_SNAKE_CASE = [[0] for _ in range(snake_case__ )]
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(snake_case__ ):
for col in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
_SCREAMING_SNAKE_CASE = y_val
_SCREAMING_SNAKE_CASE = solve(snake_case__ ,snake_case__ )
def interpolated_func(snake_case__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case__ ) )
return interpolated_func
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCamelCase ( snake_case__ = question_function ,snake_case__ = 10 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [func(snake_case__ ) for x_val in range(1 ,order + 1 )]
_SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 ,order + 1 )
]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
_SCREAMING_SNAKE_CASE = 1
while func(snake_case__ ) == poly(snake_case__ ):
x_val += 1
ret += poly(snake_case__ )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Any = DebertaTokenizer
__snake_case : Tuple = True
__snake_case : List[str] = DebertaTokenizerFast
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """[UNK]"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: int , **UpperCAmelCase_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = """lower newer"""
return input_text, output_text
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
_SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer("""Hello""" , """World""" )
_SCREAMING_SNAKE_CASE = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(
"""sequence builders""" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_SCREAMING_SNAKE_CASE = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
_SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) for seq in encoding["""input_ids"""]]
# fmt: off
_SCREAMING_SNAKE_CASE = {
"""input_ids""": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_SCREAMING_SNAKE_CASE = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCAmelCase_ )
for expected, decoded in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ = False ) -> str:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
_SCREAMING_SNAKE_CASE = input_str.split("""_""" )
_SCREAMING_SNAKE_CASE = 0 if use_pascal else 1
_SCREAMING_SNAKE_CASE = words[start_index:]
_SCREAMING_SNAKE_CASE = [word[0].upper() + word[1:] for word in words_to_capitalize]
_SCREAMING_SNAKE_CASE = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@require_torch
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(UpperCAmelCase_ )
BertModel.from_pretrained(UpperCAmelCase_ )
BertTokenizer.from_pretrained(UpperCAmelCase_ )
pipeline(task="""fill-mask""" , model=UpperCAmelCase_ )
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_SCREAMING_SNAKE_CASE = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_SCREAMING_SNAKE_CASE = """1"""
_SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(UpperCAmelCase_ )
BertModel.from_pretrained(UpperCAmelCase_ )
BertTokenizer.from_pretrained(UpperCAmelCase_ )
pipeline(task="""fill-mask""" , model=UpperCAmelCase_ )
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_SCREAMING_SNAKE_CASE = self.get_env()
_SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_SCREAMING_SNAKE_CASE = self.get_env()
_SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
_SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_SCREAMING_SNAKE_CASE = """1"""
_SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """
from transformers import pipeline
"""
_SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_SCREAMING_SNAKE_CASE = self.get_env()
_SCREAMING_SNAKE_CASE = """1"""
_SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
_SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """
from transformers import AutoModel
"""
_SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_SCREAMING_SNAKE_CASE = self.get_env()
_SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_SCREAMING_SNAKE_CASE = """1"""
_SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase_ , env=UpperCAmelCase_ , check=UpperCAmelCase_ , capture_output=UpperCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(snake_case__ ,(list, tuple) ) or not all(
isinstance(snake_case__ ,snake_case__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 ,len(snake_case__ ) ):
# update the maximum and minimum subarray products
_SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min_till_now, max_till_now
_SCREAMING_SNAKE_CASE = max(snake_case__ ,max_till_now * number )
_SCREAMING_SNAKE_CASE = min(snake_case__ ,min_till_now * number )
# update the maximum product found till now
_SCREAMING_SNAKE_CASE = max(snake_case__ ,snake_case__ )
return max_prod
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = "trajectory_transformer"
__snake_case : Union[str, Any] = ["past_key_values"]
__snake_case : Dict = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Optional[int] , UpperCAmelCase_: Optional[Any]=100 , UpperCAmelCase_: Optional[Any]=5 , UpperCAmelCase_: List[str]=1 , UpperCAmelCase_: Optional[Any]=1 , UpperCAmelCase_: Tuple=249 , UpperCAmelCase_: int=6 , UpperCAmelCase_: Optional[Any]=17 , UpperCAmelCase_: Tuple=25 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: int=4 , UpperCAmelCase_: str=128 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[str]=0.00_06 , UpperCAmelCase_: Optional[int]=512 , UpperCAmelCase_: int=0.02 , UpperCAmelCase_: List[str]=1E-12 , UpperCAmelCase_: Optional[int]=1 , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Any=1 , UpperCAmelCase_: Union[str, Any]=50_256 , UpperCAmelCase_: Dict=50_256 , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = action_weight
_SCREAMING_SNAKE_CASE = reward_weight
_SCREAMING_SNAKE_CASE = value_weight
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = block_size
_SCREAMING_SNAKE_CASE = action_dim
_SCREAMING_SNAKE_CASE = observation_dim
_SCREAMING_SNAKE_CASE = transition_dim
_SCREAMING_SNAKE_CASE = learning_rate
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = attn_pdrop
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kaiming_initializer_range
_SCREAMING_SNAKE_CASE = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 306
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def __lowerCamelCase ( snake_case__ ,snake_case__ = 16 ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case__ ,max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE = 8
else:
_SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
snake_case__ ,padding="""longest""" ,max_length=snake_case__ ,pad_to_multiple_of=snake_case__ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case__ ) == "1":
_SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() ,lr=snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dataloaders(snake_case__ ,snake_case__ )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=snake_case__ ,num_warmup_steps=1_00 ,num_training_steps=(len(snake_case__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ ,references=snake_case__ ,)
_SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case__ ,default=snake_case__ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ ,snake_case__ )
if __name__ == "__main__":
main()
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import qiskit
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("""aer_simulator""" )
_SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
_SCREAMING_SNAKE_CASE = qiskit.execute(snake_case__ ,snake_case__ ,shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = half_adder(1, 1)
print(f"Half Adder Output Qubit Counts: {counts}")
| 306
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
import argparse
from collections import defaultdict
import yaml
UpperCamelCase = '''docs/source/en/_toctree.yml'''
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = defaultdict(snake_case__ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(snake_case__ )
_SCREAMING_SNAKE_CASE = new_doc_list
_SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1]
_SCREAMING_SNAKE_CASE = []
for duplicate_key in duplicates:
_SCREAMING_SNAKE_CASE = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
_SCREAMING_SNAKE_CASE = sorted(snake_case__ ,key=lambda snake_case__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(snake_case__ )
# Sort
return overview_doc
def __lowerCamelCase ( snake_case__=False ) -> List[Any]:
"""simple docstring"""
with open(snake_case__ ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
_SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_SCREAMING_SNAKE_CASE = content[api_idx]["""sections"""]
# Then to the model doc
_SCREAMING_SNAKE_CASE = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_SCREAMING_SNAKE_CASE = api_doc[scheduler_idx]["""sections"""]
_SCREAMING_SNAKE_CASE = clean_doc_toc(snake_case__ )
_SCREAMING_SNAKE_CASE = False
if new_scheduler_doc != scheduler_doc:
_SCREAMING_SNAKE_CASE = True
if overwrite:
_SCREAMING_SNAKE_CASE = new_scheduler_doc
if diff:
if overwrite:
_SCREAMING_SNAKE_CASE = api_doc
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case__ ,allow_unicode=snake_case__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __lowerCamelCase ( snake_case__=False ) -> Optional[int]:
"""simple docstring"""
with open(snake_case__ ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
_SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_SCREAMING_SNAKE_CASE = content[api_idx]["""sections"""]
# Then to the model doc
_SCREAMING_SNAKE_CASE = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = api_doc[pipeline_idx]["""sections"""]
_SCREAMING_SNAKE_CASE = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_SCREAMING_SNAKE_CASE = pipeline_doc["""section"""]
_SCREAMING_SNAKE_CASE = clean_doc_toc(snake_case__ )
if overwrite:
_SCREAMING_SNAKE_CASE = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case__ )
# sort overall pipeline doc
_SCREAMING_SNAKE_CASE = clean_doc_toc(snake_case__ )
if new_pipeline_docs != pipeline_docs:
_SCREAMING_SNAKE_CASE = True
if overwrite:
_SCREAMING_SNAKE_CASE = new_pipeline_docs
if diff:
if overwrite:
_SCREAMING_SNAKE_CASE = api_doc
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case__ ,allow_unicode=snake_case__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: str , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self: Optional[int] , UpperCAmelCase_: Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_: str ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase ( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: List[Any]="This is a photo of {}." ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE = candidate_labels
_SCREAMING_SNAKE_CASE = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
_SCREAMING_SNAKE_CASE = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
_SCREAMING_SNAKE_CASE = text_inputs[0][0]
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase ( self: Any , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
_SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
_SCREAMING_SNAKE_CASE = stable_softmax(UpperCAmelCase_ , axis=-1 )
_SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(snake_case__ ) )
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""words.txt""" )
_SCREAMING_SNAKE_CASE = """"""
with open(snake_case__ ) as f:
_SCREAMING_SNAKE_CASE = f.readline()
_SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
assert x is not None
assert y is not None
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
# declaring the array for storing the dp values
_SCREAMING_SNAKE_CASE = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
_SCREAMING_SNAKE_CASE = 1 if x[i - 1] == y[j - 1] else 0
_SCREAMING_SNAKE_CASE = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = m, n
while i > 0 and j > 0:
_SCREAMING_SNAKE_CASE = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_SCREAMING_SNAKE_CASE = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCamelCase = '''AGGTAB'''
UpperCamelCase = '''GXTXAYB'''
UpperCamelCase = 4
UpperCamelCase = '''GTAB'''
UpperCamelCase , UpperCamelCase = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
default=_UpperCAmelCase ,metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCAmelCase )} )
__snake_case : str = field(
default=_UpperCAmelCase ,metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
__snake_case : int = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__snake_case : int = field(
default=128 ,metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} ,)
__snake_case : int = field(
default=64 ,metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} ,)
__snake_case : int = field(
default=30 ,metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} ,)
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
__snake_case : float = field(
default=0.0 ,metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
__snake_case : int = field(
default=20 ,metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
__snake_case : int = field(
default=0 ,metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} ,)
__snake_case : int = field(default=1 ,metadata={"help": "multiple threads for converting example to features"} )
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = "train"
__snake_case : Union[str, Any] = "dev"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : SquadDataTrainingArguments
__snake_case : List[SquadFeatures]
__snake_case : Split
__snake_case : bool
def __init__( self: Tuple , UpperCAmelCase_: SquadDataTrainingArguments , UpperCAmelCase_: PreTrainedTokenizer , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: Union[str, Split] = Split.train , UpperCAmelCase_: Optional[bool] = False , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[str] = "pt" , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = args
_SCREAMING_SNAKE_CASE = is_language_sensitive
_SCREAMING_SNAKE_CASE = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
_SCREAMING_SNAKE_CASE = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_SCREAMING_SNAKE_CASE = mode
# Load data features from cache or dataset file
_SCREAMING_SNAKE_CASE = """v2""" if args.version_2_with_negative else """v1"""
_SCREAMING_SNAKE_CASE = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_SCREAMING_SNAKE_CASE = cached_features_file + """.lock"""
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not args.overwrite_cache:
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_SCREAMING_SNAKE_CASE = self.old_features["""features"""]
_SCREAMING_SNAKE_CASE = self.old_features.get("""dataset""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.old_features.get("""examples""" , UpperCAmelCase_ )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_SCREAMING_SNAKE_CASE = self.processor.get_dev_examples(args.data_dir )
else:
_SCREAMING_SNAKE_CASE = self.processor.get_train_examples(args.data_dir )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCAmelCase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , UpperCAmelCase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self: Tuple ):
'''simple docstring'''
return len(self.features )
def __getitem__( self: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.features[i]
_SCREAMING_SNAKE_CASE = torch.tensor(feature.input_ids , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.tensor(feature.attention_mask , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.tensor(feature.token_type_ids , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.tensor(feature.cls_index , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.tensor(feature.p_mask , dtype=torch.float )
_SCREAMING_SNAKE_CASE = torch.tensor(feature.is_impossible , dtype=torch.float )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_SCREAMING_SNAKE_CASE = torch.tensor(feature.start_position , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("""input must be a negative integer""" )
_SCREAMING_SNAKE_CASE = len(bin(snake_case__ )[3:] )
_SCREAMING_SNAKE_CASE = bin(abs(snake_case__ ) - (1 << binary_number_length) )[3:]
_SCREAMING_SNAKE_CASE = (
(
"""1"""
+ """0""" * (binary_number_length - len(snake_case__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , np.asarray(transpose(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.reshape(UpperCAmelCase_ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.reshape(UpperCAmelCase_ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.asarray(reshape(UpperCAmelCase_ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.asarray(reshape(UpperCAmelCase_ , (12, 5) ) ) ) )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.squeeze(UpperCAmelCase_ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.squeeze(UpperCAmelCase_ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.asarray(squeeze(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.asarray(squeeze(UpperCAmelCase_ , axis=2 ) ) ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.expand_dims(UpperCAmelCase_ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1 ) ) ) )
| 306
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )["""last_hidden_state"""]
_SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(UpperCAmelCase_ , """argv""" , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCAmelCase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCAmelCase_ )
| 306
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: List[str] , UpperCAmelCase_: WhisperForConditionalGeneration , UpperCAmelCase_: WhisperProcessor , UpperCAmelCase_: AutoencoderKL , UpperCAmelCase_: CLIPTextModel , UpperCAmelCase_: CLIPTokenizer , UpperCAmelCase_: UNetaDConditionModel , UpperCAmelCase_: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_: StableDiffusionSafetyChecker , UpperCAmelCase_: CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
_SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_ )
@torch.no_grad()
def __call__( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[Any]=16_000 , UpperCAmelCase_: int = 512 , UpperCAmelCase_: int = 512 , UpperCAmelCase_: int = 50 , UpperCAmelCase_: float = 7.5 , UpperCAmelCase_: Optional[Union[str, List[str]]] = None , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: float = 0.0 , UpperCAmelCase_: Optional[torch.Generator] = None , UpperCAmelCase_: Optional[torch.FloatTensor] = None , UpperCAmelCase_: Optional[str] = "pil" , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_: int = 1 , **UpperCAmelCase_: List[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
UpperCAmelCase_ , return_tensors="""pt""" , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device )
_SCREAMING_SNAKE_CASE = self.speech_model.generate(UpperCAmelCase_ , max_length=480_000 )
_SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[
0
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCAmelCase_ )}.' )
# get prompt text embeddings
_SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
_SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_embeddings.shape
_SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
_SCREAMING_SNAKE_CASE = [""""""] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !='
F' {type(UpperCAmelCase_ )}.' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
_SCREAMING_SNAKE_CASE = negative_prompt
_SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase_ , padding="""max_length""" , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
_SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device="""cpu""" , dtype=UpperCAmelCase_ ).to(
self.device )
else:
_SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 1 / 0.1_82_15 * latents
_SCREAMING_SNAKE_CASE = self.vae.decode(UpperCAmelCase_ ).sample
_SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 306
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = PriorTransformer
__snake_case : Optional[int] = "hidden_states"
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return (4, 8)
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return (4, 8)
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
_SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
_SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
_SCREAMING_SNAKE_CASE = self.get_dummy_seed_input()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = output[0, :5].flatten().cpu()
print(UpperCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_SCREAMING_SNAKE_CASE = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: str , UpperCAmelCase_: List[str]=1 , UpperCAmelCase_: int=768 , UpperCAmelCase_: List[Any]=77 , UpperCAmelCase_: str=0 ):
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = embedding_dim
_SCREAMING_SNAKE_CASE = num_embeddings
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_seed_input(seed=UpperCAmelCase_ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )[0]
assert list(sample.shape ) == [1, 768]
_SCREAMING_SNAKE_CASE = sample[0, :8].flatten().cpu()
print(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(snake_case__ )
return n == n[::-1]
def __lowerCamelCase ( snake_case__ = 1_00_00_00 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
for i in range(1 ,snake_case__ ):
if is_palindrome(snake_case__ ) and is_palindrome(bin(snake_case__ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Union[str, Any] = CustomTokenizer
pass
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: str , *UpperCAmelCase_: int , **UpperCAmelCase_: List[str] ):
'''simple docstring'''
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return EnvironmentCommand()
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __UpperCAmelCase (_UpperCAmelCase ):
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: ArgumentParser ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self: List[str] , UpperCAmelCase_: List[str] , *UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = accelerate_config_file
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """not installed"""
if is_safetensors_available():
import safetensors
_SCREAMING_SNAKE_CASE = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_SCREAMING_SNAKE_CASE = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_SCREAMING_SNAKE_CASE = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = load_config_from_file(self._accelerate_config_file ).to_dict()
_SCREAMING_SNAKE_CASE = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else F'\t{accelerate_config}'
)
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """NA"""
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = torch.__version__
_SCREAMING_SNAKE_CASE = torch.cuda.is_available()
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """NA"""
if is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE = tf.__version__
try:
# deprecated in v2.1
_SCREAMING_SNAKE_CASE = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_SCREAMING_SNAKE_CASE = bool(tf.config.list_physical_devices("""GPU""" ) )
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_SCREAMING_SNAKE_CASE = flax.__version__
_SCREAMING_SNAKE_CASE = jax.__version__
_SCREAMING_SNAKE_CASE = jaxlib.__version__
_SCREAMING_SNAKE_CASE = jax.lib.xla_bridge.get_backend().platform
_SCREAMING_SNAKE_CASE = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'{safetensors_version}',
"""Accelerate version""": F'{accelerate_version}',
"""Accelerate config""": F'{accelerate_config_str}',
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Tensorflow version (GPU?)""": F'{tf_version} ({tf_cuda_available})',
"""Flax version (CPU?/GPU?/TPU?)""": F'{flax_version} ({jax_backend})',
"""Jax version""": F'{jax_version}',
"""JaxLib version""": F'{jaxlib_version}',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase_ ) )
return info
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: Tuple ):
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [], []
while len(snake_case__ ) > 1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min(snake_case__ ), max(snake_case__ )
start.append(snake_case__ )
end.append(snake_case__ )
collection.remove(snake_case__ )
collection.remove(snake_case__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
if index == r:
for j in range(snake_case__ ):
print(data[j] ,end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_SCREAMING_SNAKE_CASE = arr[i]
combination_util(snake_case__ ,snake_case__ ,snake_case__ ,index + 1 ,snake_case__ ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case__ ,snake_case__ ,snake_case__ ,0 ,snake_case__ ,0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
from math import ceil
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = list(range(0 ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_SCREAMING_SNAKE_CASE = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
_SCREAMING_SNAKE_CASE = [i for i in blocks if i not in device_map_blocks]
_SCREAMING_SNAKE_CASE = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = list(range(snake_case__ ) )
_SCREAMING_SNAKE_CASE = int(ceil(n_layers / len(snake_case__ ) ) )
_SCREAMING_SNAKE_CASE = [layers[i : i + n_blocks] for i in range(0 ,snake_case__ ,snake_case__ )]
return dict(zip(snake_case__ ,snake_case__ ) )
| 306
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 1
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Optional[Any] , *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: str ):
'''simple docstring'''
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(UpperCAmelCase_ )
def __call__( self: List[str] , UpperCAmelCase_: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , **UpperCAmelCase_: int ):
'''simple docstring'''
return {}, {}, {}
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.size
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
return model_outputs
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_outputs.predicted_depth
_SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = prediction.squeeze().cpu().numpy()
_SCREAMING_SNAKE_CASE = (output * 255 / np.max(UpperCAmelCase_ )).astype("""uint8""" )
_SCREAMING_SNAKE_CASE = Image.fromarray(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = predicted_depth
_SCREAMING_SNAKE_CASE = depth
return output_dict
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase = trt.Logger(trt.Logger.WARNING)
UpperCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
UpperCamelCase = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
UpperCamelCase = args.per_device_eval_batch_size
UpperCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase = True
UpperCamelCase = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
UpperCamelCase = '''temp_engine/bert-fp16.engine'''
if args.inta:
UpperCamelCase = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
UpperCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(inputs["""input_ids"""] ,dtype=np.intaa )
_SCREAMING_SNAKE_CASE = np.asarray(inputs["""attention_mask"""] ,dtype=np.intaa )
_SCREAMING_SNAKE_CASE = np.asarray(inputs["""token_type_ids"""] ,dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] ,input_ids.ravel() ,snake_case__ )
cuda.memcpy_htod_async(d_inputs[1] ,attention_mask.ravel() ,snake_case__ )
cuda.memcpy_htod_async(d_inputs[2] ,token_type_ids.ravel() ,snake_case__ )
# start time
_SCREAMING_SNAKE_CASE = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case__ ) for d_inp in d_inputs] + [int(snake_case__ ), int(snake_case__ )] ,stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case__ ,snake_case__ ,snake_case__ )
cuda.memcpy_dtoh_async(snake_case__ ,snake_case__ ,snake_case__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = end_time - start_time
_SCREAMING_SNAKE_CASE = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase = raw_datasets['''validation'''].column_names
UpperCamelCase = '''question''' if '''question''' in column_names else column_names[0]
UpperCamelCase = '''context''' if '''context''' in column_names else column_names[1]
UpperCamelCase = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
UpperCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_SCREAMING_SNAKE_CASE = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] ,examples[context_column_name if pad_on_right else question_column_name] ,truncation="""only_second""" if pad_on_right else """only_first""" ,max_length=snake_case__ ,stride=args.doc_stride ,return_overflowing_tokens=snake_case__ ,return_offsets_mapping=snake_case__ ,padding="""max_length""" ,)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_SCREAMING_SNAKE_CASE = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_SCREAMING_SNAKE_CASE = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_SCREAMING_SNAKE_CASE = tokenized_examples.sequence_ids(snake_case__ )
_SCREAMING_SNAKE_CASE = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_SCREAMING_SNAKE_CASE = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_SCREAMING_SNAKE_CASE = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
UpperCamelCase = raw_datasets['''validation''']
# Validation Feature Creation
UpperCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
UpperCamelCase = default_data_collator
UpperCamelCase = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
UpperCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__="eval" ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = postprocess_qa_predictions(
examples=snake_case__ ,features=snake_case__ ,predictions=snake_case__ ,version_2_with_negative=args.version_2_with_negative ,n_best_size=args.n_best_size ,max_answer_length=args.max_answer_length ,null_score_diff_threshold=args.null_score_diff_threshold ,output_dir=args.output_dir ,prefix=snake_case__ ,)
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_SCREAMING_SNAKE_CASE = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
_SCREAMING_SNAKE_CASE = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
_SCREAMING_SNAKE_CASE = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case__ ,label_ids=snake_case__ )
UpperCamelCase = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(snake_case__ ) ) * engine.get_binding_dtype(snake_case__ ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
UpperCamelCase = 0.0
UpperCamelCase = 0
UpperCamelCase = timeit.default_timer()
UpperCamelCase = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase , UpperCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase , UpperCamelCase = outputs
UpperCamelCase = torch.tensor(start_logits)
UpperCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
UpperCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
UpperCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
UpperCamelCase = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_000))
logger.info('''Total Number of Inference = %d''', niter)
UpperCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 306
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , """num_heads""" ) )
class __UpperCAmelCase :
def __init__( self: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any]=13 , UpperCAmelCase_: List[str]=64 , UpperCAmelCase_: Any=3 , UpperCAmelCase_: Optional[Any]=[16, 48, 96] , UpperCAmelCase_: Union[str, Any]=[1, 3, 6] , UpperCAmelCase_: str=[1, 2, 10] , UpperCAmelCase_: int=[7, 3, 3] , UpperCAmelCase_: Optional[int]=[4, 2, 2] , UpperCAmelCase_: str=[2, 1, 1] , UpperCAmelCase_: List[str]=[2, 2, 2] , UpperCAmelCase_: Tuple=[False, False, True] , UpperCAmelCase_: Optional[int]=[0.0, 0.0, 0.0] , UpperCAmelCase_: List[Any]=0.02 , UpperCAmelCase_: Tuple=1E-12 , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Optional[Any]=2 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_sizes
_SCREAMING_SNAKE_CASE = patch_stride
_SCREAMING_SNAKE_CASE = patch_padding
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = stride_kv
_SCREAMING_SNAKE_CASE = depth
_SCREAMING_SNAKE_CASE = cls_token
_SCREAMING_SNAKE_CASE = attention_drop_rate
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: str ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CvtModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = CvtForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[int] = False
__snake_case : Optional[Any] = False
__snake_case : Any = False
__snake_case : Any = False
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CvtModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] ):
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = outputs.hidden_states
_SCREAMING_SNAKE_CASE = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = CvtModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
@cached_property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = ["audio_values", "audio_mask"]
def __init__( self: Optional[int] , UpperCAmelCase_: Any=2_048 , UpperCAmelCase_: str=1 , UpperCAmelCase_: Optional[int]=[16, 16] , UpperCAmelCase_: Any=128 , UpperCAmelCase_: List[str]=44_100 , UpperCAmelCase_: Any=86 , UpperCAmelCase_: List[str]=2_048 , UpperCAmelCase_: Tuple=0.0 , **UpperCAmelCase_: Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = spectrogram_length
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1]
_SCREAMING_SNAKE_CASE = n_fft
_SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate
_SCREAMING_SNAKE_CASE = sampling_rate
_SCREAMING_SNAKE_CASE = padding_value
_SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=UpperCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: np.array ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = spectrogram(
UpperCAmelCase_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_SCREAMING_SNAKE_CASE = log_spec[:, :-1]
_SCREAMING_SNAKE_CASE = log_spec - 20.0
_SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self: Tuple , UpperCAmelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , **UpperCAmelCase_: Union[str, Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_SCREAMING_SNAKE_CASE = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
_SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_SCREAMING_SNAKE_CASE = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_SCREAMING_SNAKE_CASE = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_SCREAMING_SNAKE_CASE = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase_ ).astype(np.floataa )
# convert into correct format for padding
_SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_SCREAMING_SNAKE_CASE = np.ones([len(UpperCAmelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase_ ) ):
_SCREAMING_SNAKE_CASE = audio_features[i]
_SCREAMING_SNAKE_CASE = feature
# return as BatchFeature
if return_attention_mask:
_SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features}
_SCREAMING_SNAKE_CASE = BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
return encoded_inputs
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Any = (DPMSolverSDEScheduler,)
__snake_case : int = 10
def UpperCamelCase ( self: Optional[int] , **UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**UpperCAmelCase_ )
return config
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output.prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output.prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output.prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output.prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __UpperCAmelCase :
__snake_case : List[str] = BlenderbotSmallConfig
__snake_case : str = {}
__snake_case : Tuple = "gelu"
def __init__( self: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Any=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Tuple=2 , UpperCAmelCase_: Dict=4 , UpperCAmelCase_: List[Any]=37 , UpperCAmelCase_: Union[str, Any]=0.1 , UpperCAmelCase_: Union[str, Any]=0.1 , UpperCAmelCase_: int=20 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Any=1 , UpperCAmelCase_: List[str]=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE = prepare_blenderbot_small_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFBlenderbotSmallModel(config=UpperCAmelCase_ ).get_decoder()
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids[:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
_SCREAMING_SNAKE_CASE = 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(snake_case__ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : str = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case : Tuple = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case : Optional[int] = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case : str = True
__snake_case : Any = False
__snake_case : List[str] = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
@require_tokenizers
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : List[Any] = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__snake_case : Union[str, Any] = "facebook/blenderbot_small-90M"
@cached_property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors="""tf""" )
_SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase_ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """file.csv"""
_SCREAMING_SNAKE_CASE = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(snake_case__ ,"""w""" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """malformed_file.csv"""
_SCREAMING_SNAKE_CASE = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(snake_case__ ,"""w""" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """csv_with_image.csv"""
_SCREAMING_SNAKE_CASE = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(snake_case__ ,"""w""" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """csv_with_label.csv"""
_SCREAMING_SNAKE_CASE = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(snake_case__ ,"""w""" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """csv_with_int_list.csv"""
_SCREAMING_SNAKE_CASE = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(snake_case__ ,"""w""" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Csv()
_SCREAMING_SNAKE_CASE = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ ,match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
with open(snake_case__ ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = f.read().splitlines()[1]
_SCREAMING_SNAKE_CASE = Csv(encoding="""utf-8""" ,features=Features({"""image""": Image()} ) )
_SCREAMING_SNAKE_CASE = csv._generate_tables([[csv_file_with_image]] )
_SCREAMING_SNAKE_CASE = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
_SCREAMING_SNAKE_CASE = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
with open(snake_case__ ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = f.read().splitlines()[1:]
_SCREAMING_SNAKE_CASE = Csv(encoding="""utf-8""" ,features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
_SCREAMING_SNAKE_CASE = csv._generate_tables([[csv_file_with_label]] )
_SCREAMING_SNAKE_CASE = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
_SCREAMING_SNAKE_CASE = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(snake_case__ ) for label in labels]
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Csv(encoding="""utf-8""" ,sep=""",""" ,converters={"""int_list""": lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
_SCREAMING_SNAKE_CASE = csv._generate_tables([[csv_file_with_int_list]] )
_SCREAMING_SNAKE_CASE = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
_SCREAMING_SNAKE_CASE = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_SCREAMING_SNAKE_CASE = """xvjiarui/stable-diffusion-2-inpainting"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = num_samples * [init_image]
_SCREAMING_SNAKE_CASE = num_samples * [mask_image]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = jax.random.split(UpperCAmelCase_ , jax.device_count() )
_SCREAMING_SNAKE_CASE = shard(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = shard(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = shard(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipeline(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output.images.reshape(UpperCAmelCase_ , 512 , 512 , 3 )
_SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
_SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[Any] = "gpt_neox"
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=50_432 , UpperCAmelCase_: Union[str, Any]=6_144 , UpperCAmelCase_: int=44 , UpperCAmelCase_: List[Any]=64 , UpperCAmelCase_: str=24_576 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: Any=0.25 , UpperCAmelCase_: Tuple=10_000 , UpperCAmelCase_: List[str]=0.0 , UpperCAmelCase_: List[str]=0.0 , UpperCAmelCase_: str=0.1 , UpperCAmelCase_: Dict=2_048 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: Any=1E-5 , UpperCAmelCase_: str=True , UpperCAmelCase_: List[Any]=0 , UpperCAmelCase_: int=2 , UpperCAmelCase_: List[Any]=False , UpperCAmelCase_: Any=True , UpperCAmelCase_: List[str]=None , **UpperCAmelCase_: int , ):
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = rotary_pct
_SCREAMING_SNAKE_CASE = rotary_emb_base
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = classifier_dropout
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = tie_word_embeddings
_SCREAMING_SNAKE_CASE = use_parallel_residual
_SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
_SCREAMING_SNAKE_CASE = self.rope_scaling.get("""type""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.rope_scaling.get("""factor""" , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 306
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = BertJapaneseTokenizer
__snake_case : Optional[int] = False
__snake_case : Tuple = True
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase ( self: int , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
_SCREAMING_SNAKE_CASE = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_input_output_texts(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCAmelCase_ , """wb""" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , """rb""" ) as handle:
_SCREAMING_SNAKE_CASE = pickle.load(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
try:
_SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
try:
_SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
try:
_SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCAmelCase_ , """wb""" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , """rb""" ) as handle:
_SCREAMING_SNAKE_CASE = pickle.load(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCAmelCase_ , """wb""" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , """rb""" ) as handle:
_SCREAMING_SNAKE_CASE = pickle.load(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
_SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
_SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(UpperCAmelCase_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
_SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(UpperCAmelCase_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = BertJapaneseTokenizer
__snake_case : Optional[int] = False
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase ( self: Optional[Any] , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
_SCREAMING_SNAKE_CASE = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
UpperCAmelCase_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
_SCREAMING_SNAKE_CASE = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[float] = field(
default=0.0 ,metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=_UpperCAmelCase ,metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=_UpperCAmelCase ,metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=_UpperCAmelCase ,metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=_UpperCAmelCase ,metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=_UpperCAmelCase ,metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=_UpperCAmelCase ,metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" ,metadata={"help": F'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} ,)
| 306
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[Any] = "switch_transformers"
__snake_case : Dict = ["past_key_values"]
__snake_case : Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self: Tuple , UpperCAmelCase_: Optional[Any]=32_128 , UpperCAmelCase_: Dict=768 , UpperCAmelCase_: List[Any]=64 , UpperCAmelCase_: Optional[Any]=2_048 , UpperCAmelCase_: Union[str, Any]=64 , UpperCAmelCase_: Optional[int]=12 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: List[str]=12 , UpperCAmelCase_: List[str]=3 , UpperCAmelCase_: List[str]=12 , UpperCAmelCase_: Optional[Any]=8 , UpperCAmelCase_: Any=False , UpperCAmelCase_: Optional[int]=0.01 , UpperCAmelCase_: Union[str, Any]="float32" , UpperCAmelCase_: Optional[Any]=False , UpperCAmelCase_: Dict=32 , UpperCAmelCase_: List[str]=128 , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Union[str, Any]=1E-6 , UpperCAmelCase_: Tuple=0.0_01 , UpperCAmelCase_: List[Any]=0.0_01 , UpperCAmelCase_: Optional[Any]=1.0 , UpperCAmelCase_: str="relu" , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=False , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=0 , UpperCAmelCase_: int=1 , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = d_kv
_SCREAMING_SNAKE_CASE = d_ff
_SCREAMING_SNAKE_CASE = num_sparse_encoder_layers
_SCREAMING_SNAKE_CASE = num_layers
_SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_SCREAMING_SNAKE_CASE = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_SCREAMING_SNAKE_CASE = self.num_layers // self.num_sparse_encoder_layers
else:
_SCREAMING_SNAKE_CASE = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_SCREAMING_SNAKE_CASE = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_SCREAMING_SNAKE_CASE = self.num_decoder_layers # HACK: this will create 0 sparse layers
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = num_experts
_SCREAMING_SNAKE_CASE = expert_capacity
_SCREAMING_SNAKE_CASE = router_bias
_SCREAMING_SNAKE_CASE = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
_SCREAMING_SNAKE_CASE = router_dtype
_SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
_SCREAMING_SNAKE_CASE = relative_attention_num_buckets
_SCREAMING_SNAKE_CASE = relative_attention_max_distance
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = feed_forward_proj
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = add_router_probs
_SCREAMING_SNAKE_CASE = router_z_loss_coef
_SCREAMING_SNAKE_CASE = router_aux_loss_coef
_SCREAMING_SNAKE_CASE = self.feed_forward_proj.split("""-""" )
_SCREAMING_SNAKE_CASE = act_info[-1]
_SCREAMING_SNAKE_CASE = act_info[0] == """gated"""
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_SCREAMING_SNAKE_CASE = """gelu_new"""
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 306
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 1
|
import numpy as np
from transformers import Pipeline
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.max(snake_case__ ,axis=-1 ,keepdims=snake_case__ )
_SCREAMING_SNAKE_CASE = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=snake_case__ )
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: Union[str, Any] , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
if "second_text" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=None ):
'''simple docstring'''
return self.tokenizer(UpperCAmelCase_ , text_pair=UpperCAmelCase_ , return_tensors=self.framework )
def UpperCamelCase ( self: int , UpperCAmelCase_: Tuple ):
'''simple docstring'''
return self.model(**UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_outputs.logits[0].numpy()
_SCREAMING_SNAKE_CASE = softmax(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.model.config.idalabel[best_class]
_SCREAMING_SNAKE_CASE = probabilities[best_class].item()
_SCREAMING_SNAKE_CASE = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 250
_SCREAMING_SNAKE_CASE = ids_tensor((batch_size, length) , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.ones((batch_size, length) , device=UpperCAmelCase_ , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
_SCREAMING_SNAKE_CASE = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MaxLengthCriteria(max_length=10 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
_SCREAMING_SNAKE_CASE = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: int ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_SCREAMING_SNAKE_CASE = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("""String lengths must match!""" )
_SCREAMING_SNAKE_CASE = 0
for chara, chara in zip(snake_case__ ,snake_case__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
def __lowerCamelCase ( snake_case__ = 2_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_SCREAMING_SNAKE_CASE = [0] * (pence + 1)
_SCREAMING_SNAKE_CASE = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ ,pence + 1 ,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import math
import sys
import cva
import numpy as np
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = math.sqrt(snake_case__ )
_SCREAMING_SNAKE_CASE = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.zeros((kernel_size, kernel_size) )
for i in range(0 ,snake_case__ ):
for j in range(0 ,snake_case__ ):
_SCREAMING_SNAKE_CASE = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.zeros(img.shape )
_SCREAMING_SNAKE_CASE = get_gauss_kernel(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = img.shape
for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ):
_SCREAMING_SNAKE_CASE = get_slice(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = img_s - img_s[kernel_size // 2, kernel_size // 2]
_SCREAMING_SNAKE_CASE = vec_gaussian(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = np.multiply(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = np.multiply(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = np.sum(snake_case__ ) / np.sum(snake_case__ )
_SCREAMING_SNAKE_CASE = val
return imga
def __lowerCamelCase ( snake_case__ ) -> tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = args[1] if args[1:] else """../image_data/lena.jpg"""
_SCREAMING_SNAKE_CASE = float(args[2] ) if args[2:] else 1.0
_SCREAMING_SNAKE_CASE = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_SCREAMING_SNAKE_CASE = int(args[4] )
_SCREAMING_SNAKE_CASE = kernel_size + abs(kernel_size % 2 - 1 )
else:
_SCREAMING_SNAKE_CASE = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = parse_args(sys.argv)
UpperCamelCase = cva.imread(filename, 0)
cva.imshow('''input image''', img)
UpperCamelCase = img / 255
UpperCamelCase = out.astype('''float32''')
UpperCamelCase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCamelCase = out * 255
UpperCamelCase = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.