code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a =logging.get_logger(__name__)
a ={
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = '''resnet'''
_UpperCAmelCase : List[Any] = ['''basic''', '''bottleneck''']
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Tuple=6_4 ,SCREAMING_SNAKE_CASE__ : Tuple=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,SCREAMING_SNAKE_CASE__ : str=[3, 4, 6, 3] ,SCREAMING_SNAKE_CASE__ : Optional[int]="bottleneck" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Union[str, Any] = embedding_size
__lowerCamelCase : Union[str, Any] = hidden_sizes
__lowerCamelCase : Tuple = depths
__lowerCamelCase : int = layer_type
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : Optional[Any] = downsample_in_first_stage
__lowerCamelCase : str = ['stem'] + [F"stage{idx}" for idx in range(1 ,len(SCREAMING_SNAKE_CASE__) + 1)]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ ,out_indices=SCREAMING_SNAKE_CASE__ ,stage_names=self.stage_names)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Optional[int]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCAmelCase ( self : List[Any]):
return 1E-3
| 73 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a =TypeVar("""T""")
class A_ ( Generic[T] ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[T] ,SCREAMING_SNAKE_CASE__ : Callable[[T, T], T]):
__lowerCamelCase : Any | T = None
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : list[T] = [any_type for _ in range(self.N)] + arr
__lowerCamelCase : Optional[int] = fnc
self.build()
def lowerCAmelCase ( self : Dict):
for p in range(self.N - 1 ,0 ,-1):
__lowerCamelCase : List[str] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1])
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : T):
p += self.N
__lowerCamelCase : Dict = v
while p > 1:
__lowerCamelCase : List[Any] = p // 2
__lowerCamelCase : List[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1])
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int): # noqa: E741
__lowerCamelCase , __lowerCamelCase : Dict = l + self.N, r + self.N
__lowerCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
__lowerCamelCase : Dict = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE__ ,self.st[l])
if r % 2 == 0:
__lowerCamelCase : str = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE__ ,self.st[r])
__lowerCamelCase , __lowerCamelCase : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a =SegmentTree(test_array, min)
a =SegmentTree(test_array, max)
a =SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
for i in range(len(lowerCamelCase__ ) ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
__lowerCamelCase : Any = reduce(lowerCamelCase__ , test_array[i : j + 1] )
__lowerCamelCase : Union[str, Any] = reduce(lowerCamelCase__ , test_array[i : j + 1] )
__lowerCamelCase : Dict = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert max_range == max_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert sum_range == sum_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
a =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
__lowerCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : Dict = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 73 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
create_state_space_tree(lowerCamelCase__ , [] , 0 , [0 for i in range(len(lowerCamelCase__ ) )] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> None:
if index == len(lowerCamelCase__ ):
print(lowerCamelCase__ )
return
for i in range(len(lowerCamelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__lowerCamelCase : Union[str, Any] = True
create_state_space_tree(lowerCamelCase__ , lowerCamelCase__ , index + 1 , lowerCamelCase__ )
current_sequence.pop()
__lowerCamelCase : int = False
a =[3, 1, 2, 4]
generate_all_permutations(sequence)
a =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 73 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCamelCase : List[str] = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
__lowerCamelCase : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCamelCase : Tuple = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCamelCase : List[str] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Dict = dct.pop(lowerCamelCase__ )
__lowerCamelCase : str = val
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
if "handwritten" in checkpoint_url:
__lowerCamelCase : Tuple = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
__lowerCamelCase : int = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('RGB' )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCamelCase : str = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCamelCase : Optional[Any] = 1_0_2_4
__lowerCamelCase : Any = 4_0_9_6
__lowerCamelCase : str = 2_4
__lowerCamelCase : Tuple = 1_6
__lowerCamelCase : List[Any] = 1_0_2_4
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCamelCase : List[str] = False
__lowerCamelCase : Dict = 'relu'
__lowerCamelCase : List[Any] = 1_0_2_4
__lowerCamelCase : Dict = True
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Dict = False
# load HuggingFace model
__lowerCamelCase : Optional[Any] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
__lowerCamelCase : Optional[int] = TrOCRForCausalLM(lowerCamelCase__ )
__lowerCamelCase : List[str] = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' , check_hash=lowerCamelCase__ )['model']
__lowerCamelCase : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCamelCase : str = state_dict.pop(lowerCamelCase__ )
if key.startswith('decoder' ) and "output_projection" not in key:
__lowerCamelCase : List[Any] = val
else:
__lowerCamelCase : Optional[int] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
__lowerCamelCase : Dict = ViTImageProcessor(size=encoder_config.image_size )
__lowerCamelCase : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-large' )
__lowerCamelCase : str = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors='pt' ).pixel_values
# verify logits
__lowerCamelCase : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCamelCase : Tuple = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__lowerCamelCase : List[Any] = outputs.logits
__lowerCamelCase : Optional[int] = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCamelCase : Any = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCamelCase : Optional[int] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCamelCase : Dict = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCamelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCamelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 73 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
_UpperCAmelCase : float
_UpperCAmelCase : Tuple[int]
def lowerCAmelCase ( self : Optional[int]):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def lowerCAmelCase ( self : str):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa))
def lowerCAmelCase ( self : int):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : int = torch.arange(self.height * self.width)
__lowerCamelCase : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(SCREAMING_SNAKE_CASE__ ,self.width ,rounding_mode='trunc'),
] ,axis=1 ,)
return coords
@property
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , *__lowerCamelCase : Union[str, Any] = self.shape
__lowerCamelCase : Optional[Any] = int(np.prod(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = self.get_image_coords()
__lowerCamelCase : List[str] = torch.broadcast_to(coords.unsqueeze(0) ,[batch_size * inner_batch_size, *coords.shape])
__lowerCamelCase : Any = self.get_camera_rays(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = rays.view(SCREAMING_SNAKE_CASE__ ,inner_batch_size * self.height * self.width ,2 ,3)
return rays
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase : Tuple = coords.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Optional[int] = self.resolution()
__lowerCamelCase : Optional[int] = self.fov()
__lowerCamelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase : int = fracs * torch.tan(fov / 2)
__lowerCamelCase : int = fracs.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Union[str, Any] = (
self.z.view(SCREAMING_SNAKE_CASE__ ,1 ,3)
+ self.x.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, :1]
+ self.y.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, 1:]
)
__lowerCamelCase : Tuple = directions / directions.norm(dim=-1 ,keepdim=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(SCREAMING_SNAKE_CASE__ ,1 ,3) ,[batch_size, directions.shape[1], 3]),
directions,
] ,dim=2 ,)
return rays.view(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,2 ,3)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> DifferentiableProjectiveCamera:
__lowerCamelCase : Dict = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__lowerCamelCase : Dict = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase : List[str] = -z * 4
__lowerCamelCase : List[str] = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
__lowerCamelCase : int = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 73 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return "".join(chr(ord(lowerCamelCase__ ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
__lowerCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : Dict = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Dict = (
AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else mask_token
)
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Union[str, Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Optional[Any]):
return len(self.sp_model)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Tuple = None
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
if self.remove_space:
__lowerCamelCase : Dict = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Optional[Any] = inputs
__lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
__lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73 | 1 |
from manim import *
class A_ ( SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Union[str, Any] = Rectangle(height=0.5 ,width=0.5)
__lowerCamelCase : Tuple = Rectangle(height=0.25 ,width=0.25)
__lowerCamelCase : Tuple = Rectangle(height=0.46 ,width=0.46).set_stroke(width=0)
__lowerCamelCase : int = [mem.copy() for i in range(6)]
__lowerCamelCase : Dict = [mem.copy() for i in range(6)]
__lowerCamelCase : Optional[int] = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : List[str] = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : Dict = VGroup(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : List[str] = Text('CPU' ,font_size=2_4)
__lowerCamelCase : List[str] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
cpu.move_to([-2.5, -0.5, 0])
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = [mem.copy() for i in range(4)]
__lowerCamelCase : Any = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : int = Text('GPU' ,font_size=2_4)
__lowerCamelCase : Optional[Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
gpu.move_to([-1, -1, 0])
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = [mem.copy() for i in range(6)]
__lowerCamelCase : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : Tuple = Text('Model' ,font_size=2_4)
__lowerCamelCase : Any = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
model.move_to([3, -1.0, 0])
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[int] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__):
rect.set_stroke(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = Rectangle(height=0.46 / 4 ,width=0.46 / 3).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE__ ,opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) ,buff=0.02 ,direction=SCREAMING_SNAKE_CASE__)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0)
self.add(SCREAMING_SNAKE_CASE__)
model_cpu_arr.append(SCREAMING_SNAKE_CASE__)
self.add(*SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = [mem.copy() for i in range(6)]
__lowerCamelCase : Any = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : int = Text('Loaded Checkpoint' ,font_size=2_4)
__lowerCamelCase : List[Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
checkpoint.move_to([3, 0.5, 0])
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Dict = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = fill.copy().set_fill(SCREAMING_SNAKE_CASE__ ,opacity=0.7)
target.move_to(SCREAMING_SNAKE_CASE__)
ckpt_arr.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(SCREAMING_SNAKE_CASE__)
self.add(*SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
__lowerCamelCase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0])
self.add(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" ,font_size=1_8 ,)
blue_text.next_to(SCREAMING_SNAKE_CASE__ ,DOWN * 2.4 ,aligned_edge=key_text.get_left())
self.add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." ,font_size=2_4 ,)
step_a.move_to([2, 2, 0])
__lowerCamelCase : Dict = [meta_mem.copy() for i in range(6)]
__lowerCamelCase : Dict = [meta_mem.copy() for i in range(6)]
__lowerCamelCase : Dict = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : List[Any] = VGroup(*SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : Tuple = VGroup(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0)
__lowerCamelCase : Optional[int] = Text('Disk' ,font_size=2_4)
__lowerCamelCase : Union[str, Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__)
disk.move_to([-4.0, -1.25, 0])
self.play(Write(SCREAMING_SNAKE_CASE__ ,run_time=3) ,Write(SCREAMING_SNAKE_CASE__ ,run_time=1) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1))
__lowerCamelCase : int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ ,run_time=1.5))
self.play(*SCREAMING_SNAKE_CASE__)
self.play(FadeOut(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." ,font_size=2_4)
step_a.move_to([2, 2, 0])
self.play(Write(SCREAMING_SNAKE_CASE__ ,run_time=3))
self.play(
FadeOut(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__) ,)
self.wait()
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
while b:
__lowerCamelCase , __lowerCamelCase : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 73 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73 | 1 |
from __future__ import annotations
from random import random
class A_ :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : int | None = None):
__lowerCamelCase : Union[str, Any] = value
__lowerCamelCase : int = random()
__lowerCamelCase : Node | None = None
__lowerCamelCase : Node | None = None
def __repr__( self : List[Any]):
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} ,indent=1)
def __str__( self : Union[str, Any]):
__lowerCamelCase : Optional[int] = str(self.value) + ' '
__lowerCamelCase : Optional[Any] = str(self.left or '')
__lowerCamelCase : List[Any] = str(self.right or '')
return value + left + right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = split(root.left , lowerCamelCase__ )
return left, root
else:
__lowerCamelCase , __lowerCamelCase : List[str] = split(root.right , lowerCamelCase__ )
return root, right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowerCamelCase : Optional[Any] = merge(left.right , lowerCamelCase__ )
return left
else:
__lowerCamelCase : List[str] = merge(lowerCamelCase__ , right.left )
return right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None:
__lowerCamelCase : List[Any] = Node(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Optional[int] = split(lowerCamelCase__ , lowerCamelCase__ )
return merge(merge(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None:
__lowerCamelCase , __lowerCamelCase : List[str] = split(lowerCamelCase__ , value - 1 )
__lowerCamelCase , __lowerCamelCase : Any = split(lowerCamelCase__ , lowerCamelCase__ )
return merge(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
__lowerCamelCase : str = insert(lowerCamelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
__lowerCamelCase : List[Any] = erase(lowerCamelCase__ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : int = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__lowerCamelCase : Optional[int] = input()
while args != "q":
__lowerCamelCase : Optional[int] = interact_treap(lowerCamelCase__ , lowerCamelCase__ )
print(lowerCamelCase__ )
__lowerCamelCase : int = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 73 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a ={
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a ="""ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, int]:
__lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = get_letter_count(lowerCamelCase__ )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCamelCase__ )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = ''.join(freq_to_letter[freq] )
__lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCamelCase__ , reverse=lowerCamelCase__ )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : str = get_frequency_order(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
# word like '180' or '身高' or '神'
for char in word:
__lowerCamelCase : int = ord(lowerCamelCase__ )
if not _is_chinese_char(lowerCamelCase__ ):
return 0
return 1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Any = set()
for token in tokens:
__lowerCamelCase : Tuple = len(lowerCamelCase__ ) > 1 and is_chinese(lowerCamelCase__ )
if chinese_word:
word_set.add(lowerCamelCase__ )
__lowerCamelCase : List[Any] = list(lowerCamelCase__ )
return word_list
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
if not chinese_word_set:
return bert_tokens
__lowerCamelCase : List[str] = max([len(lowerCamelCase__ ) for w in chinese_word_set] )
__lowerCamelCase : Dict = bert_tokens
__lowerCamelCase , __lowerCamelCase : Optional[Any] = 0, len(lowerCamelCase__ )
while start < end:
__lowerCamelCase : Tuple = True
if is_chinese(bert_word[start] ):
__lowerCamelCase : List[str] = min(end - start , lowerCamelCase__ )
for i in range(lowerCamelCase__ , 1 , -1 ):
__lowerCamelCase : Dict = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCamelCase : List[str] = '##' + bert_word[j]
__lowerCamelCase : Optional[Any] = start + i
__lowerCamelCase : Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : int = []
for i in range(0 , len(lowerCamelCase__ ) , 1_0_0 ):
__lowerCamelCase : int = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
__lowerCamelCase : Optional[int] = [get_chinese_word(lowerCamelCase__ ) for r in res]
ltp_res.extend(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Tuple = []
for i in range(0 , len(lowerCamelCase__ ) , 1_0_0 ):
__lowerCamelCase : List[str] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Tuple = []
for input_ids, chinese_word in zip(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[Any] = []
for id in input_ids:
__lowerCamelCase : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase__ )
input_tokens.append(lowerCamelCase__ )
__lowerCamelCase : List[Any] = add_sub_symbol(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase__ ):
if token[:2] == "##":
__lowerCamelCase : List[str] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase__ ) == 1 and _is_chinese_char(ord(lowerCamelCase__ ) ):
ref_id.append(lowerCamelCase__ )
ref_ids.append(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
return ref_ids
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Any = f.readlines()
__lowerCamelCase : str = [line.strip() for line in data if len(lowerCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCamelCase : Any = LTP(args.ltp ) # faster in GPU device
__lowerCamelCase : List[str] = BertTokenizer.from_pretrained(args.bert )
__lowerCamelCase : List[str] = prepare_ref(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase : List[str] = [json.dumps(lowerCamelCase__ ) + '\n' for ref in ref_ids]
f.writelines(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
a =parser.parse_args()
main(args)
| 73 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
return EnvironmentCommand(args.accelerate_config_file )
class A_ ( SCREAMING_SNAKE_CASE ):
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : ArgumentParser):
__lowerCamelCase : List[str] = parser.add_parser('env')
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__)
download_parser.add_argument(
'--accelerate-config_file' ,default=SCREAMING_SNAKE_CASE__ ,help='The accelerate config file to use for the default values in the launching script.' ,)
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__)
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,*SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Union[str, Any] = accelerate_config_file
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = 'not installed'
if is_safetensors_available():
import safetensors
__lowerCamelCase : Tuple = safetensors.__version__
elif importlib.util.find_spec('safetensors') is not None:
import safetensors
__lowerCamelCase : Union[str, Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
__lowerCamelCase : str = 'not installed'
__lowerCamelCase : Optional[int] = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowerCamelCase : Optional[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[Any] = load_config_from_file(self._accelerate_config_file).to_dict()
__lowerCamelCase : Optional[int] = (
'\n'.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else F"\t{accelerate_config}"
)
__lowerCamelCase : Dict = 'not installed'
__lowerCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
__lowerCamelCase : str = torch.__version__
__lowerCamelCase : str = torch.cuda.is_available()
__lowerCamelCase : Any = 'not installed'
__lowerCamelCase : Tuple = 'NA'
if is_tf_available():
import tensorflow as tf
__lowerCamelCase : Optional[Any] = tf.__version__
try:
# deprecated in v2.1
__lowerCamelCase : int = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowerCamelCase : Tuple = bool(tf.config.list_physical_devices('GPU'))
__lowerCamelCase : Union[str, Any] = 'not installed'
__lowerCamelCase : int = 'not installed'
__lowerCamelCase : Optional[Any] = 'not installed'
__lowerCamelCase : Any = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
__lowerCamelCase : Any = flax.__version__
__lowerCamelCase : Optional[Any] = jax.__version__
__lowerCamelCase : Union[str, Any] = jaxlib.__version__
__lowerCamelCase : Any = jax.lib.xla_bridge.get_backend().platform
__lowerCamelCase : List[Any] = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F"{safetensors_version}",
'Accelerate version': F"{accelerate_version}",
'Accelerate config': F"{accelerate_config_str}",
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'Tensorflow version (GPU?)': F"{tf_version} ({tf_cuda_available})",
'Flax version (CPU?/GPU?/TPU?)': F"{flax_version} ({jax_backend})",
'Jax version': F"{jax_version}",
'JaxLib version': F"{jaxlib_version}",
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(SCREAMING_SNAKE_CASE__))
return info
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Any):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()]) + "\n"
| 73 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : List[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCamelCase : Optional[int] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__lowerCamelCase : int = 4
__lowerCamelCase : List[str] = 4_8
__lowerCamelCase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCamelCase : str = [6, 6, 6, 6]
__lowerCamelCase : Optional[int] = 6_0
__lowerCamelCase : Union[str, Any] = [6, 6, 6, 6]
__lowerCamelCase : List[str] = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 4
__lowerCamelCase : List[Any] = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__lowerCamelCase : Tuple = 1
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = 1_2_6
__lowerCamelCase : List[Any] = 7
__lowerCamelCase : Tuple = 255.0
__lowerCamelCase : Any = ''
return config
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
if "patch_embed.proj" in name and "layers" not in name:
__lowerCamelCase : Dict = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase : Tuple = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__lowerCamelCase : str = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__lowerCamelCase : Tuple = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__lowerCamelCase : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowerCamelCase : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowerCamelCase : str = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowerCamelCase : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCamelCase : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCamelCase : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__lowerCamelCase : Any = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__lowerCamelCase : Optional[Any] = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__lowerCamelCase : int = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__lowerCamelCase : List[Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__lowerCamelCase : str = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__lowerCamelCase : Dict = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase : Dict = 'layernorm.bias'
if "conv_first" in name:
__lowerCamelCase : Optional[int] = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__lowerCamelCase : Any = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__lowerCamelCase : List[Any] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__lowerCamelCase : Dict = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__lowerCamelCase : List[Any] = name.replace('upsample.2' , 'upsample.convolution_1' )
__lowerCamelCase : Union[str, Any] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__lowerCamelCase : int = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__lowerCamelCase : List[str] = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__lowerCamelCase : List[Any] = 'swin2sr.' + name
return name
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
for key in orig_state_dict.copy().keys():
__lowerCamelCase : int = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
__lowerCamelCase : Dict = key.split('.' )
__lowerCamelCase : Dict = int(key_split[1] )
__lowerCamelCase : Optional[Any] = int(key_split[4] )
__lowerCamelCase : List[str] = config.embed_dim
if "weight" in key:
__lowerCamelCase : Optional[int] = val[:dim, :]
__lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :]
__lowerCamelCase : str = val[-dim:, :]
else:
__lowerCamelCase : List[str] = val[:dim]
__lowerCamelCase : Union[str, Any] = val[dim : dim * 2]
__lowerCamelCase : Union[str, Any] = val[-dim:]
pass
else:
__lowerCamelCase : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = get_config(lowerCamelCase__ )
__lowerCamelCase : List[Any] = SwinaSRForImageSuperResolution(lowerCamelCase__ )
model.eval()
__lowerCamelCase : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' )
__lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(lowerCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
__lowerCamelCase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__lowerCamelCase : int = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('RGB' )
__lowerCamelCase : Union[str, Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__lowerCamelCase : str = 1_2_6 if 'Jpeg' in checkpoint_url else 2_5_6
__lowerCamelCase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowerCamelCase : Union[str, Any] = transforms(lowerCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
__lowerCamelCase : List[str] = pixel_values[:, 0, :, :].unsqueeze(1 )
__lowerCamelCase : Tuple = model(lowerCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__lowerCamelCase : Tuple = torch.Size([1, 3, 5_1_2, 5_1_2] )
__lowerCamelCase : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCamelCase : List[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
__lowerCamelCase : Dict = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__lowerCamelCase : Optional[int] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
__lowerCamelCase : Dict = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCamelCase : Optional[Any] = torch.Size([1, 3, 5_1_2, 5_1_2] )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCamelCase : Optional[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
__lowerCamelCase : List[str] = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-3 )
print('Looks ok!' )
__lowerCamelCase : Optional[Any] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__lowerCamelCase : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
a =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 73 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : List[Any] = '''AutoImageProcessor'''
_UpperCAmelCase : Dict = '''AutoTokenizer'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = kwargs.pop('feature_extractor')
__lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : Optional[int] = False
def __call__( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = kwargs.pop('images' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = kwargs.pop('text' ,SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__lowerCamelCase : int = args[0]
__lowerCamelCase : List[str] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__lowerCamelCase : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[Any] = encodings['input_ids']
return inputs
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Any):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@contextmanager
def lowerCAmelCase ( self : Tuple):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = self.tokenizer
yield
__lowerCamelCase : Tuple = self.image_processor
__lowerCamelCase : Tuple = False
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
if added_vocab is None:
__lowerCamelCase : str = self.tokenizer.get_added_vocab()
__lowerCamelCase : Union[str, Any] = {}
while tokens:
__lowerCamelCase : Tuple = re.search(R'<s_(.*?)>' ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1)
__lowerCamelCase : List[str] = re.search(RF"</s_{key}>" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : List[Any] = tokens.replace(SCREAMING_SNAKE_CASE__ ,'')
else:
__lowerCamelCase : Tuple = end_token.group()
__lowerCamelCase : int = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if content is not None:
__lowerCamelCase : List[Any] = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : str = self.tokenajson(SCREAMING_SNAKE_CASE__ ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if value:
if len(SCREAMING_SNAKE_CASE__) == 1:
__lowerCamelCase : Tuple = value[0]
__lowerCamelCase : int = value
else: # leaf nodes
__lowerCamelCase : Tuple = []
for leaf in content.split(R'<sep/>'):
__lowerCamelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__)
if len(output[key]) == 1:
__lowerCamelCase : Dict = output[key][0]
__lowerCamelCase : Dict = tokens[tokens.find(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : List[str]):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = '''swin2sr'''
_UpperCAmelCase : Dict = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_8_0 ,SCREAMING_SNAKE_CASE__ : Tuple=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Tuple=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Any=2.0 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Any="gelu" ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 ,SCREAMING_SNAKE_CASE__ : str="1conv" ,SCREAMING_SNAKE_CASE__ : List[str]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Optional[int] = embed_dim
__lowerCamelCase : int = depths
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Optional[int] = mlp_ratio
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = drop_path_rate
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Any = upscale
__lowerCamelCase : Any = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Optional[Any] = upsampler
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=SCREAMING_SNAKE_CASE__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase : Any = {
'enabled': True,
'processes_per_host': 8,
}
__lowerCamelCase : List[Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowerCamelCase : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowerCamelCase : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=SCREAMING_SNAKE_CASE__ ,instance_type=self.instance_type ,debugger_hook_config=SCREAMING_SNAKE_CASE__ ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} ,metric_definitions=self.env.metric_definitions ,distribution=SCREAMING_SNAKE_CASE__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# create estimator
__lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,SCREAMING_SNAKE_CASE__)
| 73 | 1 |
import socket
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__lowerCamelCase : Tuple = socket.gethostname()
__lowerCamelCase : Union[str, Any] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__lowerCamelCase : Dict = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(lowerCamelCase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 73 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = KandinskyVaaInpaintPipeline
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_UpperCAmelCase : List[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : str = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : Any):
return 3_2
@property
def lowerCAmelCase ( self : Optional[Any]):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : int):
return 1_0_0
@property
def lowerCAmelCase ( self : str):
torch.manual_seed(0)
__lowerCamelCase : List[str] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Dict = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : int):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : List[Any]):
torch.manual_seed(0)
__lowerCamelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = self.dummy_unet
__lowerCamelCase : Optional[Any] = self.dummy_movq
__lowerCamelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=0):
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create init_image
__lowerCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = image.cpu().permute(0 ,2 ,3 ,1)[0]
__lowerCamelCase : str = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__)).convert('RGB').resize((2_5_6, 2_5_6))
# create mask
__lowerCamelCase : str = np.ones((6_4, 6_4) ,dtype=np.floataa)
__lowerCamelCase : Optional[Any] = 0
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : int = 'cpu'
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Any = output.images
__lowerCamelCase : str = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}")
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Dict = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def lowerCAmelCase ( self : Any):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy')
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
__lowerCamelCase : List[str] = np.ones((7_6_8, 7_6_8) ,dtype=np.floataa)
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[int] = 'a hat'
__lowerCamelCase : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' ,torch_dtype=torch.floataa)
__lowerCamelCase : Tuple = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.Generator(device='cpu').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : List[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : str = pipeline(
image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type='np' ,)
__lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 73 |
import csv
import tweepy
# Twitter API credentials
a =""""""
a =""""""
a =""""""
a =""""""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
# authorize twitter, initialize tweepy
__lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ )
auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# save the id of the oldest tweet less one
__lowerCamelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : str = api.user_timeline(
screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# update the id of the oldest tweet less one
__lowerCamelCase : Optional[int] = alltweets[-1].id - 1
print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowerCamelCase : Any = csv.writer(lowerCamelCase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 73 | 1 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = ConsistencyModelPipeline
_UpperCAmelCase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_UpperCAmelCase : List[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' ,subfolder='test_unet' ,)
return unet
@property
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' ,subfolder='test_unet_class_cond' ,)
return unet
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int=False):
if class_cond:
__lowerCamelCase : Optional[Any] = self.dummy_cond_unet
else:
__lowerCamelCase : List[str] = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple=0):
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Union[str, Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Any=0 ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]="cpu" ,SCREAMING_SNAKE_CASE__ : List[str]=torch.floataa ,SCREAMING_SNAKE_CASE__ : Optional[int]=(1, 3, 6_4, 6_4)):
__lowerCamelCase : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Union[str, Any] = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ,shape=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = latents
return inputs
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int=0 ,SCREAMING_SNAKE_CASE__ : Dict="cpu" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=torch.floataa ,SCREAMING_SNAKE_CASE__ : Optional[Any]=(1, 3, 6_4, 6_4)):
if type(SCREAMING_SNAKE_CASE__) == str:
__lowerCamelCase : List[Any] = torch.device(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__)
return latents
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : int = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.get_inputs()
__lowerCamelCase : int = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : List[str] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Any = 1
__lowerCamelCase : Any = None
__lowerCamelCase : Tuple = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 73 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = '''poolformer'''
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Any=1_6 ,SCREAMING_SNAKE_CASE__ : Tuple=1_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4.0 ,SCREAMING_SNAKE_CASE__ : int=[2, 2, 6, 2] ,SCREAMING_SNAKE_CASE__ : Dict=[6_4, 1_2_8, 3_2_0, 5_1_2] ,SCREAMING_SNAKE_CASE__ : Tuple=[7, 3, 3, 3] ,SCREAMING_SNAKE_CASE__ : int=[4, 2, 2, 2] ,SCREAMING_SNAKE_CASE__ : int=[2, 1, 1, 1] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : str=1E-5 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.02 ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
__lowerCamelCase : str = num_channels
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Optional[int] = stride
__lowerCamelCase : List[str] = padding
__lowerCamelCase : List[str] = pool_size
__lowerCamelCase : str = hidden_sizes
__lowerCamelCase : List[Any] = mlp_ratio
__lowerCamelCase : Optional[int] = depths
__lowerCamelCase : Optional[int] = patch_sizes
__lowerCamelCase : Union[str, Any] = strides
__lowerCamelCase : Any = num_encoder_blocks
__lowerCamelCase : Tuple = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : List[Any] = use_layer_scale
__lowerCamelCase : List[str] = layer_scale_init_value
__lowerCamelCase : str = initializer_range
super().__init__(**SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Dict):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCAmelCase ( self : Optional[Any]):
return 2E-3
| 73 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[tuple[float, float]]):
__lowerCamelCase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__) - 1
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree ,SCREAMING_SNAKE_CASE__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE__) ,5) == 1
return output_values
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : Tuple = self.basis_function(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
__lowerCamelCase : list[float] = [] # x coordinates of points to plot
__lowerCamelCase : list[float] = [] # y coordinates of points to plot
__lowerCamelCase : Any = 0.0
while t <= 1:
__lowerCamelCase : List[Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
__lowerCamelCase : Optional[Any] = [i[0] for i in self.list_of_points]
__lowerCamelCase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='blue' ,label='Curve of Degree ' + str(self.degree) ,)
plt.scatter(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='red' ,label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 73 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCamelCase : str = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : Any = do_resize
__lowerCamelCase : Optional[Any] = size
__lowerCamelCase : List[Any] = resample
__lowerCamelCase : Optional[int] = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : List[str] = do_rescale
__lowerCamelCase : Optional[Any] = rescale_factor
__lowerCamelCase : Dict = do_normalize
__lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCamelCase : List[str] = int((2_5_6 / 2_2_4) * size['shortest_edge'])
__lowerCamelCase : Union[str, Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}")
return resize(
SCREAMING_SNAKE_CASE__ ,size=(size_dict['height'], size_dict['width']) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
__lowerCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size['height'], size['width']) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[float] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
__lowerCamelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = resample if resample is not None else self.resample
__lowerCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
__lowerCamelCase : List[Any] = size if size is not None else self.size
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : Optional[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : str = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
__lowerCamelCase : Any = [self.resize(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
if do_center_crop:
__lowerCamelCase : Union[str, Any] = [self.center_crop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
__lowerCamelCase : Any = [self.rescale(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
__lowerCamelCase : List[Any] = [self.normalize(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
| 73 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
__lowerCamelCase : str = []
def generate(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[Any] = [0] * n
res.append(tuple(lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__lowerCamelCase , __lowerCamelCase : str = arr[i], arr[0]
else:
__lowerCamelCase , __lowerCamelCase : Dict = arr[i], arr[c[i]]
res.append(tuple(lowerCamelCase__ ) )
c[i] += 1
__lowerCamelCase : Any = 0
else:
__lowerCamelCase : Tuple = 0
i += 1
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
a =input("""Enter numbers separated by a comma:\n""").strip()
a =[int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 73 |
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: # noqa: E741
while r - l > 1:
__lowerCamelCase : int = (l + r) // 2
if v[m] >= key:
__lowerCamelCase : str = m
else:
__lowerCamelCase : Tuple = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if len(lowerCamelCase__ ) == 0:
return 0
__lowerCamelCase : Union[str, Any] = [0] * len(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : str = v[0]
for i in range(1 , len(lowerCamelCase__ ) ):
if v[i] < tail[0]:
__lowerCamelCase : Optional[Any] = v[i]
elif v[i] > tail[length - 1]:
__lowerCamelCase : List[str] = v[i]
length += 1
else:
__lowerCamelCase : str = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import os
import sys
a =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
| 73 | 1 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 1 |
from __future__ import annotations
from random import choice
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
return choice(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Any = random_pivot(lowerCamelCase__ )
# partition based on pivot
# linear time
__lowerCamelCase : int = [e for e in lst if e < pivot]
__lowerCamelCase : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase__ ) < k - 1:
return kth_number(lowerCamelCase__ , k - len(lowerCamelCase__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
__lowerCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : Dict = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 73 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = '''vision-encoder-decoder'''
_UpperCAmelCase : Union[str, Any] = True
def __init__( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
__lowerCamelCase : Optional[int] = kwargs.pop('encoder')
__lowerCamelCase : List[str] = encoder_config.pop('model_type')
__lowerCamelCase : Union[str, Any] = kwargs.pop('decoder')
__lowerCamelCase : Optional[Any] = decoder_config.pop('model_type')
__lowerCamelCase : str = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = True
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : List[str]):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
__lowerCamelCase : List[str] = True
__lowerCamelCase : Tuple = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = copy.deepcopy(self.__dict__)
__lowerCamelCase : Optional[Any] = self.encoder.to_dict()
__lowerCamelCase : Optional[Any] = self.decoder.to_dict()
__lowerCamelCase : Tuple = self.__class__.model_type
return output
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Dict):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCAmelCase ( self : Tuple):
return 1E-4
@property
def lowerCAmelCase ( self : List[Any]):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}})
class A_ ( SCREAMING_SNAKE_CASE ):
@property
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = OrderedDict()
__lowerCamelCase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__lowerCamelCase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__lowerCamelCase : Tuple = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None ,):
import torch
__lowerCamelCase : int = OrderedDict()
__lowerCamelCase : Any = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Any = dummy_input['input_ids'].shape
__lowerCamelCase : Optional[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowerCamelCase : str = dummy_input.pop('input_ids')
__lowerCamelCase : Any = dummy_input.pop('attention_mask')
__lowerCamelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE__)
return common_inputs
class A_ ( SCREAMING_SNAKE_CASE ):
@property
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : PretrainedConfig):
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,SCREAMING_SNAKE_CASE__ : str = "default"):
__lowerCamelCase : Optional[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 73 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
a ={
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
_UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
_UpperCAmelCase : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
_UpperCAmelCase : Optional[int] = field(
default=10_000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
_UpperCAmelCase : Optional[float] = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
_UpperCAmelCase : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
_UpperCAmelCase : Optional[int] = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
_UpperCAmelCase : Optional[int] = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
_UpperCAmelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
_UpperCAmelCase : Optional[int] = field(default=50_000 , metadata={'''help''': '''Maximum number of training steps.'''} )
_UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=1_024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
_UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
_UpperCAmelCase : Optional[int] = field(
default=1_024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
_UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
_UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=1_024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
_UpperCAmelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
_UpperCAmelCase : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
_UpperCAmelCase : Optional[int] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
_UpperCAmelCase : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
_UpperCAmelCase : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
_UpperCAmelCase : Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
_UpperCAmelCase : Optional[int] = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
_UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
_UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class A_ :
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
_UpperCAmelCase : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
_UpperCAmelCase : Optional[int] = field(
default=100_000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
_UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase : Optional[float] = field(
default=1_000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
_UpperCAmelCase : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
_UpperCAmelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
_UpperCAmelCase : Optional[float] = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
_UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase : Optional[int] = field(default=200_000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
_UpperCAmelCase : Optional[int] = field(
default=32_768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
_UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
_UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
_UpperCAmelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
_UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
_UpperCAmelCase : Optional[bool] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 73 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0**9 ) -> int:
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCamelCase : Optional[int] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Dict = (
AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else mask_token
)
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Union[str, Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Optional[Any]):
return len(self.sp_model)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Tuple = None
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
if self.remove_space:
__lowerCamelCase : Dict = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Optional[Any] = inputs
__lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
__lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a ={
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a ="""ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, int]:
__lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = get_letter_count(lowerCamelCase__ )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCamelCase__ )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = ''.join(freq_to_letter[freq] )
__lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCamelCase__ , reverse=lowerCamelCase__ )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : str = get_frequency_order(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt"""}
a ={
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
a ={
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
a ={
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[str]="[UNK]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[PAD]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[CLS]" ,SCREAMING_SNAKE_CASE__ : Tuple="[MASK]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__lowerCamelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type'))
__lowerCamelCase : Optional[int] = do_lower_case
__lowerCamelCase : List[str] = strip_accents
__lowerCamelCase : Optional[int] = tokenize_chinese_chars
__lowerCamelCase : List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = do_lower_case
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
__lowerCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : str = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
__lowerCamelCase : Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 73 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73 | 1 |
from math import factorial, radians
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 1_8 , lowerCamelCase__ = 1_0 ) -> float:
__lowerCamelCase : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowerCamelCase : Any = radians(lowerCamelCase__ )
__lowerCamelCase : List[Any] = angle_in_radians
__lowerCamelCase : Dict = 3
__lowerCamelCase : List[Any] = -1
for _ in range(lowerCamelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase__ )
__lowerCamelCase : List[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 73 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a ={
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a ="""ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, int]:
__lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = get_letter_count(lowerCamelCase__ )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCamelCase__ )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = ''.join(freq_to_letter[freq] )
__lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCamelCase__ , reverse=lowerCamelCase__ )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : str = get_frequency_order(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
return base * power(lowerCamelCase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
a =int(input("""Enter the base: """).strip())
a =int(input("""Enter the exponent: """).strip())
a =power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
a =1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 73 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Any = [False] * len(lowerCamelCase__ )
__lowerCamelCase : int = [-1] * len(lowerCamelCase__ )
def dfs(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[Any] = True
__lowerCamelCase : List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCamelCase__ , 1 - c )
for i in range(len(lowerCamelCase__ ) ):
if not visited[i]:
dfs(lowerCamelCase__ , 0 )
for i in range(len(lowerCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 73 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a =logging.get_logger(__name__)
a ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
a =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
for attribute in key.split('.' ):
__lowerCamelCase : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
__lowerCamelCase : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
__lowerCamelCase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCamelCase : int = value
elif weight_type == "weight_g":
__lowerCamelCase : str = value
elif weight_type == "weight_v":
__lowerCamelCase : Dict = value
elif weight_type == "bias":
__lowerCamelCase : int = value
else:
__lowerCamelCase : Dict = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
__lowerCamelCase : Optional[int] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCamelCase : Optional[Any] = None
for name, value in fairseq_dict.items():
__lowerCamelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : Tuple = True
elif name.split('.' )[0] == "proj":
__lowerCamelCase : Tuple = fairseq_model.proj
__lowerCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : Any = True
if "*" in mapped_key:
__lowerCamelCase : Tuple = name.split(lowerCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase : Optional[Any] = mapped_key.replace('*' , lowerCamelCase__ )
if "weight_g" in name:
__lowerCamelCase : int = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : List[Any] = 'weight_v'
elif "bias" in name:
__lowerCamelCase : List[str] = 'bias'
elif "weight" in name:
__lowerCamelCase : List[Any] = 'weight'
else:
__lowerCamelCase : Tuple = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Dict = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : int = name.split('.' )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCamelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCamelCase : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCamelCase : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCamelCase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase , __lowerCamelCase : List[Any] = emb.weight.shape
__lowerCamelCase : Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
__lowerCamelCase : Tuple = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : str = f.readlines()
__lowerCamelCase : int = [line.split(' ' )[0] for line in lines]
__lowerCamelCase : List[Any] = len(lowerCamelCase__ )
__lowerCamelCase : List[str] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(lowerCamelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> str:
__lowerCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = SpeechaTextaConfig.from_pretrained(
lowerCamelCase__ , vocab_size=lowerCamelCase__ , decoder_layers=lowerCamelCase__ , do_stable_layer_norm=lowerCamelCase__ )
__lowerCamelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowerCamelCase : Dict = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCamelCase : Optional[int] = WavaVecaModel(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , lowerCamelCase__ )
__lowerCamelCase : List[str] = SpeechaTextaForCausalLM(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowerCamelCase : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__lowerCamelCase : Union[str, Any] = SpeechEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = False
# add projection layer
__lowerCamelCase : Any = nn.Parameter(projection_layer.weight )
__lowerCamelCase : Tuple = nn.Parameter(projection_layer.bias )
__lowerCamelCase : List[Any] = create_vocab_dict(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = SpeechaTextaTokenizer(os.path.join(lowerCamelCase__ , 'vocab.json' ) )
tokenizer.save_pretrained(lowerCamelCase__ )
__lowerCamelCase : str = hf_wavavec.config.to_dict()
__lowerCamelCase : str = tokenizer.pad_token_id
__lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
__lowerCamelCase : Dict = tokenizer.eos_token_id
__lowerCamelCase : Dict = 'speech_to_text_2'
__lowerCamelCase : Tuple = 'wav2vec2'
__lowerCamelCase : List[str] = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
feature_extractor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
a =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 73 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : List[Any] = '''AutoImageProcessor'''
_UpperCAmelCase : Dict = '''AutoTokenizer'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = kwargs.pop('feature_extractor')
__lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : Optional[int] = False
def __call__( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = kwargs.pop('images' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = kwargs.pop('text' ,SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__lowerCamelCase : int = args[0]
__lowerCamelCase : List[str] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__lowerCamelCase : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[Any] = encodings['input_ids']
return inputs
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Any):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@contextmanager
def lowerCAmelCase ( self : Tuple):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = self.tokenizer
yield
__lowerCamelCase : Tuple = self.image_processor
__lowerCamelCase : Tuple = False
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
if added_vocab is None:
__lowerCamelCase : str = self.tokenizer.get_added_vocab()
__lowerCamelCase : Union[str, Any] = {}
while tokens:
__lowerCamelCase : Tuple = re.search(R'<s_(.*?)>' ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1)
__lowerCamelCase : List[str] = re.search(RF"</s_{key}>" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : List[Any] = tokens.replace(SCREAMING_SNAKE_CASE__ ,'')
else:
__lowerCamelCase : Tuple = end_token.group()
__lowerCamelCase : int = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if content is not None:
__lowerCamelCase : List[Any] = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : str = self.tokenajson(SCREAMING_SNAKE_CASE__ ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if value:
if len(SCREAMING_SNAKE_CASE__) == 1:
__lowerCamelCase : Tuple = value[0]
__lowerCamelCase : int = value
else: # leaf nodes
__lowerCamelCase : Tuple = []
for leaf in content.split(R'<sep/>'):
__lowerCamelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__)
if len(output[key]) == 1:
__lowerCamelCase : Dict = output[key][0]
__lowerCamelCase : Dict = tokens[tokens.find(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : List[str]):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 73 | 1 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int = 1_6 ,SCREAMING_SNAKE_CASE__ : int = 8_8 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : str = "geglu" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,):
super().__init__()
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = attention_head_dim
__lowerCamelCase : Union[str, Any] = num_attention_heads * attention_head_dim
__lowerCamelCase : Optional[Any] = in_channels
__lowerCamelCase : Tuple = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE__ ,num_channels=SCREAMING_SNAKE_CASE__ ,eps=1E-6 ,affine=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 3. Define transformers blocks
__lowerCamelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dropout=SCREAMING_SNAKE_CASE__ ,cross_attention_dim=SCREAMING_SNAKE_CASE__ ,activation_fn=SCREAMING_SNAKE_CASE__ ,attention_bias=SCREAMING_SNAKE_CASE__ ,double_self_attention=SCREAMING_SNAKE_CASE__ ,norm_elementwise_affine=SCREAMING_SNAKE_CASE__ ,)
for d in range(SCREAMING_SNAKE_CASE__)
])
__lowerCamelCase : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = hidden_states.shape
__lowerCamelCase : List[str] = batch_frames // num_frames
__lowerCamelCase : int = hidden_states
__lowerCamelCase : Optional[int] = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = hidden_states.permute(0 ,2 ,1 ,3 ,4)
__lowerCamelCase : Any = self.norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = hidden_states.permute(0 ,3 ,4 ,2 ,1).reshape(batch_size * height * width ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.proj_in(SCREAMING_SNAKE_CASE__)
# 2. Blocks
for block in self.transformer_blocks:
__lowerCamelCase : Union[str, Any] = block(
SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,cross_attention_kwargs=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ ,)
# 3. Output
__lowerCamelCase : Optional[Any] = self.proj_out(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = (
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
.permute(0 ,3 ,4 ,1 ,2)
.contiguous()
)
__lowerCamelCase : List[str] = hidden_states.reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE__)
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 1 |
from functools import lru_cache
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> set:
__lowerCamelCase : Optional[int] = 2
__lowerCamelCase : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCamelCase__ )
if n > 1:
factors.add(lowerCamelCase__ )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
return len(unique_prime_factors(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
return len(set(lowerCamelCase__ ) ) in (0, 1)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Tuple = 2
while True:
# Increment each value of a generated range
__lowerCamelCase : int = [base + i for i in range(lowerCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase : int = [upf_len(lowerCamelCase__ ) for x in group]
checker.append(lowerCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 4 ) -> int:
__lowerCamelCase : int = run(lowerCamelCase__ )
return results[0] if len(lowerCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 73 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=SCREAMING_SNAKE_CASE__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase : Any = {
'enabled': True,
'processes_per_host': 8,
}
__lowerCamelCase : List[Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowerCamelCase : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowerCamelCase : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=SCREAMING_SNAKE_CASE__ ,instance_type=self.instance_type ,debugger_hook_config=SCREAMING_SNAKE_CASE__ ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} ,metric_definitions=self.env.metric_definitions ,distribution=SCREAMING_SNAKE_CASE__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# create estimator
__lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,SCREAMING_SNAKE_CASE__)
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = '''mgp-str'''
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=[3_2, 1_2_8] ,SCREAMING_SNAKE_CASE__ : Any=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=2_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_8 ,SCREAMING_SNAKE_CASE__ : Tuple=5_0_2_5_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 ,SCREAMING_SNAKE_CASE__ : Dict=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : int=1E-5 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Tuple = max_token_length
__lowerCamelCase : Tuple = num_character_labels
__lowerCamelCase : List[str] = num_bpe_labels
__lowerCamelCase : int = num_wordpiece_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : List[str] = mlp_ratio
__lowerCamelCase : List[str] = distilled
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Any = drop_rate
__lowerCamelCase : Tuple = qkv_bias
__lowerCamelCase : Any = attn_drop_rate
__lowerCamelCase : int = drop_path_rate
__lowerCamelCase : str = output_aa_attentions
__lowerCamelCase : int = initializer_range
| 73 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import csv
import tweepy
# Twitter API credentials
a =""""""
a =""""""
a =""""""
a =""""""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
# authorize twitter, initialize tweepy
__lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ )
auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# save the id of the oldest tweet less one
__lowerCamelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : str = api.user_timeline(
screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# update the id of the oldest tweet less one
__lowerCamelCase : Optional[int] = alltweets[-1].id - 1
print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowerCamelCase : Any = csv.writer(lowerCamelCase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 73 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Optional[int] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
__lowerCamelCase : List[str] = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__)['last_hidden_state']
__lowerCamelCase : str = tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
# compare the actual values for a slice.
__lowerCamelCase : Any = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 73 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : List[Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCamelCase : Any = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__lowerCamelCase : Union[str, Any] = F"{src_lang}-{tgt_lang}"
__lowerCamelCase : Any = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowerCamelCase : str = os.path.join(lowerCamelCase__ , 'README.md' )
print(F"Generating {path}" )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
a =Path(__file__).resolve().parent.parent.parent
a =repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a , a , a =model_name.split("""-""")
a =model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 73 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[tuple[float, float]]):
__lowerCamelCase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__) - 1
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree ,SCREAMING_SNAKE_CASE__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE__) ,5) == 1
return output_values
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : Tuple = self.basis_function(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
__lowerCamelCase : list[float] = [] # x coordinates of points to plot
__lowerCamelCase : list[float] = [] # y coordinates of points to plot
__lowerCamelCase : Any = 0.0
while t <= 1:
__lowerCamelCase : List[Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
__lowerCamelCase : Optional[Any] = [i[0] for i in self.list_of_points]
__lowerCamelCase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='blue' ,label='Curve of Degree ' + str(self.degree) ,)
plt.scatter(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='red' ,label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 73 | 1 |
class A_ :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : list[int]):
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = [0] * len_array
if len_array > 0:
__lowerCamelCase : Optional[int] = array[0]
for i in range(1 ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(SCREAMING_SNAKE_CASE__)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowerCamelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowerCamelCase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowerCamelCase__ , default=4_2 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowerCamelCase__ , default=0 , help='cuda_id.' , )
__lowerCamelCase : Any = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if not len(lowerCamelCase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = imgs[0].size
__lowerCamelCase : List[Any] = Image.new('RGB' , size=(cols * w, rows * h) )
__lowerCamelCase , __lowerCamelCase : List[str] = grid.size
for i, img in enumerate(lowerCamelCase__ ):
grid.paste(lowerCamelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="robotic cat with wings" , lowerCamelCase__=7.5 , lowerCamelCase__=5_0 , lowerCamelCase__=1 , lowerCamelCase__=4_2 , ) -> int:
__lowerCamelCase : Any = torch.Generator(pipeline.device ).manual_seed(lowerCamelCase__ )
__lowerCamelCase : int = pipeline(
lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , ).images
__lowerCamelCase : Union[str, Any] = int(math.sqrt(lowerCamelCase__ ) )
__lowerCamelCase : str = image_grid(lowerCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a =parse_args()
# Load models and create wrapper for stable diffusion
a =CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
a =CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
a =AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
a =UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
a =StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a =lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
a =load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
a =unet.to(torch.device("""cuda""", args.cuda_id))
a =pipeline.to(unet.device)
a , a =generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
a =os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 73 |
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a =datasets.utils.logging.get_logger(__name__)
@dataclass
class A_ ( datasets.BuilderConfig ):
_UpperCAmelCase : int = 10_000
_UpperCAmelCase : Optional[List[str]] = None
_UpperCAmelCase : Optional[datasets.Features] = None
class A_ ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase : int = ParquetConfig
def lowerCAmelCase ( self : List[str]):
return datasets.DatasetInfo(features=self.config.features)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
__lowerCamelCase : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(SCREAMING_SNAKE_CASE__ ,(str, list, tuple)):
__lowerCamelCase : List[str] = data_files
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCamelCase : Optional[Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files})]
__lowerCamelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCamelCase : Union[str, Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__):
with open(SCREAMING_SNAKE_CASE__ ,'rb') as f:
__lowerCamelCase : int = datasets.Features.from_arrow_schema(pq.read_schema(SCREAMING_SNAKE_CASE__))
break
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ ,gen_kwargs={'files': files}))
return splits
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCamelCase : Optional[int] = table_cast(SCREAMING_SNAKE_CASE__ ,self.info.features.arrow_schema)
return pa_table
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'")
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__)):
with open(SCREAMING_SNAKE_CASE__ ,'rb') as f:
__lowerCamelCase : Tuple = pq.ParquetFile(SCREAMING_SNAKE_CASE__)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns)):
__lowerCamelCase : Tuple = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(SCREAMING_SNAKE_CASE__)
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE__)}: {e}")
raise
| 73 |
import os
import sys
a =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a =yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
a ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
a ="""\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a =(
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
a ="""\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a =(
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
a ="""\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a ="""The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
a ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
a ="""The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
a ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
a ="""\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
a ="""The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
a ="""\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a ="""The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
a =""""""
a ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
a ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
a ="""The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
assert ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
__lowerCamelCase : List[Any] = ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : str = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
__lowerCamelCase : List[Any] = ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[str] = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
__lowerCamelCase : Any = expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
__lowerCamelCase : Dict = ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[str] = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
__lowerCamelCase : Dict = expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Optional[int] = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
__lowerCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : Dict = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 73 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
if "cls_token" in name:
__lowerCamelCase : Dict = name.replace('cls_token' , 'vit.embeddings.cls_token' )
if "mask_token" in name:
__lowerCamelCase : Any = name.replace('mask_token' , 'decoder.mask_token' )
if "decoder_pos_embed" in name:
__lowerCamelCase : Optional[Any] = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase : Dict = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__lowerCamelCase : Tuple = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase : Optional[int] = name.replace('patch_embed.norm' , 'vit.embeddings.norm' )
if "decoder_blocks" in name:
__lowerCamelCase : Optional[Any] = name.replace('decoder_blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
__lowerCamelCase : int = name.replace('blocks' , 'vit.encoder.layer' )
if "attn.proj" in name:
__lowerCamelCase : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowerCamelCase : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowerCamelCase : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowerCamelCase : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCamelCase : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCamelCase : Any = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
__lowerCamelCase : List[Any] = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
__lowerCamelCase : Dict = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
__lowerCamelCase : List[Any] = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
__lowerCamelCase : Optional[Any] = name.replace('norm.weight' , 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
__lowerCamelCase : str = name.replace('norm.bias' , 'vit.layernorm.bias' )
return name
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCamelCase : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
__lowerCamelCase : Tuple = key.split('.' )
__lowerCamelCase : Tuple = int(key_split[1] )
if "decoder_blocks" in key:
__lowerCamelCase : int = config.decoder_hidden_size
__lowerCamelCase : Dict = 'decoder.decoder_layers.'
if "weight" in key:
__lowerCamelCase : int = val[:dim, :]
__lowerCamelCase : str = val[dim : dim * 2, :]
__lowerCamelCase : Optional[Any] = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase : Union[str, Any] = val[:dim]
__lowerCamelCase : Optional[int] = val[dim : dim * 2]
__lowerCamelCase : Dict = val[-dim:]
else:
__lowerCamelCase : List[Any] = config.hidden_size
__lowerCamelCase : Tuple = 'vit.encoder.layer.'
if "weight" in key:
__lowerCamelCase : Dict = val[:dim, :]
__lowerCamelCase : Tuple = val[dim : dim * 2, :]
__lowerCamelCase : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase : Optional[Any] = val[:dim]
__lowerCamelCase : Dict = val[dim : dim * 2]
__lowerCamelCase : List[Any] = val[-dim:]
else:
__lowerCamelCase : Optional[Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : List[Any] = ViTMAEConfig()
if "large" in checkpoint_url:
__lowerCamelCase : Tuple = 1_0_2_4
__lowerCamelCase : Optional[Any] = 4_0_9_6
__lowerCamelCase : List[Any] = 2_4
__lowerCamelCase : Tuple = 1_6
elif "huge" in checkpoint_url:
__lowerCamelCase : int = 1_4
__lowerCamelCase : Any = 1_2_8_0
__lowerCamelCase : Dict = 5_1_2_0
__lowerCamelCase : Tuple = 3_2
__lowerCamelCase : Tuple = 1_6
__lowerCamelCase : Tuple = ViTMAEForPreTraining(lowerCamelCase__ )
__lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' )['model']
__lowerCamelCase : str = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase : List[Any] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
__lowerCamelCase : Dict = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
__lowerCamelCase : Optional[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
__lowerCamelCase : str = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase : Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase : Tuple = model(**lowerCamelCase__ )
__lowerCamelCase : Optional[int] = outputs.logits
if "large" in checkpoint_url:
__lowerCamelCase : Optional[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
__lowerCamelCase : Optional[int] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
__lowerCamelCase : List[Any] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 73 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a ="""src/diffusers"""
a ="""."""
# This is to make sure the diffusers module imported is the one in the repo.
a =importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
a =spec.loader.load_module()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return line.startswith(lowerCamelCase__ ) or len(lowerCamelCase__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , lowerCamelCase__ ) is not None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : List[Any] = object_name.split('.' )
__lowerCamelCase : List[Any] = 0
# First let's find the module where our object lives.
__lowerCamelCase : Union[str, Any] = parts[i]
while i < len(lowerCamelCase__ ) and not os.path.isfile(os.path.join(lowerCamelCase__ , F"{module}.py" ) ):
i += 1
if i < len(lowerCamelCase__ ):
__lowerCamelCase : Dict = os.path.join(lowerCamelCase__ , parts[i] )
if i >= len(lowerCamelCase__ ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(lowerCamelCase__ , F"{module}.py" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
__lowerCamelCase : str = ''
__lowerCamelCase : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase__ ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase__ ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowerCamelCase : Union[str, Any] = line_index
while line_index < len(lowerCamelCase__ ) and _should_continue(lines[line_index] , lowerCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCamelCase : List[str] = lines[start_index:line_index]
return "".join(lowerCamelCase__ )
a =re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
a =re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
a =re.compile(r"""<FILL\s+[^>]*>""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Dict = code.split('\n' )
__lowerCamelCase : Tuple = 0
while idx < len(lowerCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCamelCase__ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : Any = len(get_indent(lowerCamelCase__ ) ) > 0
if has_indent:
__lowerCamelCase : Optional[Any] = F"class Bla:\n{code}"
__lowerCamelCase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCamelCase__ )
__lowerCamelCase : Any = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = style_docstrings_in_code(lowerCamelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> int:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Any = []
__lowerCamelCase : Optional[int] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase__ ):
__lowerCamelCase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = search.groups()
__lowerCamelCase : Any = find_code_in_diffusers(lowerCamelCase__ )
__lowerCamelCase : List[Any] = get_indent(lowerCamelCase__ )
__lowerCamelCase : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowerCamelCase : Optional[Any] = theoretical_indent
__lowerCamelCase : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowerCamelCase : int = True
while line_index < len(lowerCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase__ ):
break
__lowerCamelCase : Any = lines[line_index]
__lowerCamelCase : List[str] = _should_continue(lowerCamelCase__ , lowerCamelCase__ ) and re.search(F"^{indent}# End copy" , lowerCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCamelCase : int = lines[start_index:line_index]
__lowerCamelCase : List[Any] = ''.join(lowerCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
__lowerCamelCase : int = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCamelCase__ ) is None]
__lowerCamelCase : Union[str, Any] = '\n'.join(lowerCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : int = replace_pattern.replace('with' , '' ).split(',' )
__lowerCamelCase : Optional[Any] = [_re_replace_pattern.search(lowerCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = pattern.groups()
__lowerCamelCase : List[Any] = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if option.strip() == "all-casing":
__lowerCamelCase : Union[str, Any] = re.sub(obja.lower() , obja.lower() , lowerCamelCase__ )
__lowerCamelCase : Dict = re.sub(obja.upper() , obja.upper() , lowerCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowerCamelCase : Dict = blackify(lines[start_index - 1] + theoretical_code )
__lowerCamelCase : Any = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowerCamelCase : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowerCamelCase : Optional[int] = start_index + 1
if overwrite and len(lowerCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase__ )
return diffs
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = False ) -> Any:
__lowerCamelCase : List[str] = glob.glob(os.path.join(lowerCamelCase__ , '**/*.py' ) , recursive=lowerCamelCase__ )
__lowerCamelCase : Any = []
for filename in all_files:
__lowerCamelCase : str = is_copy_consistent(lowerCamelCase__ , lowerCamelCase__ )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Union[str, Any] = '\n'.join(lowerCamelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 73 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model"""}
a ={
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
a ={
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
a =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : List[int] = []
_UpperCAmelCase : List[int] = []
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Tuple="</s>" ,SCREAMING_SNAKE_CASE__ : str="</s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Dict="<unk>" ,SCREAMING_SNAKE_CASE__ : Tuple="<pad>" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase : Any = kwargs.get('additional_special_tokens' ,[])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE__ ,tgt_lang=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCamelCase : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCamelCase : Tuple = 1
__lowerCamelCase : List[Any] = len(self.sp_model)
__lowerCamelCase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE__)
}
__lowerCamelCase : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCamelCase : Tuple = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
__lowerCamelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCamelCase : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
__lowerCamelCase : Tuple = self.lang_code_to_id[self._src_lang]
__lowerCamelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowerCAmelCase ( self : Tuple):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : int):
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Optional[int]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase : int = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[int] = ''
__lowerCamelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : Tuple = True
__lowerCamelCase : str = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = [1] * len(self.prefix_tokens)
__lowerCamelCase : Tuple = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__)) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__)) + ([0] * len(SCREAMING_SNAKE_CASE__)) + suffix_ones
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] ,SCREAMING_SNAKE_CASE__ : Optional[str] ,**SCREAMING_SNAKE_CASE__ : Dict):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
__lowerCamelCase : List[str] = src_lang
__lowerCamelCase : int = self(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str = "en_XX" ,SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None ,SCREAMING_SNAKE_CASE__ : str = "ro_RO" ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
__lowerCamelCase : str = src_lang
__lowerCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
return self.set_src_lang_special_tokens(self.src_lang)
def lowerCAmelCase ( self : Tuple):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = self.lang_code_to_id[src_lang]
__lowerCamelCase : str = [self.cur_lang_code_id]
__lowerCamelCase : Dict = [self.eos_token_id]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
__lowerCamelCase : List[Any] = [self.cur_lang_code_id]
__lowerCamelCase : List[Any] = [self.eos_token_id]
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Dict = (
AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else mask_token
)
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Union[str, Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Optional[Any]):
return len(self.sp_model)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Tuple = None
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
if self.remove_space:
__lowerCamelCase : Dict = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Optional[Any] = inputs
__lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
__lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : Optional[int] = HfArgumentParser(lowerCamelCase__ )
__lowerCamelCase : Dict = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCamelCase : str = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
__lowerCamelCase : Dict = ' '.join(str(lowerCamelCase__ ).split(' ' )[:-1] )
__lowerCamelCase : List[Any] = ''
__lowerCamelCase : Dict = eval(str(lowerCamelCase__ ).split(' ' )[-1] )
__lowerCamelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = 0, 0, 0
__lowerCamelCase : str = ugly_nums[ia] * 2
__lowerCamelCase : Optional[Any] = ugly_nums[ia] * 3
__lowerCamelCase : List[str] = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase__ ):
__lowerCamelCase : int = min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
ugly_nums.append(lowerCamelCase__ )
if next_num == next_a:
ia += 1
__lowerCamelCase : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowerCamelCase : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowerCamelCase : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 73 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]=1_3 ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 ,SCREAMING_SNAKE_CASE__ : List[str]=[1, 2, 1] ,SCREAMING_SNAKE_CASE__ : Tuple=[2, 2, 4] ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1_0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=8 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Tuple = mlp_ratio
__lowerCamelCase : Dict = qkv_bias
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Tuple = drop_path_rate
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Optional[Any] = use_absolute_embeddings
__lowerCamelCase : str = patch_norm
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : int = is_training
__lowerCamelCase : str = scope
__lowerCamelCase : int = use_labels
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : int = encoder_stride
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : int):
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = SwinvaModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__lowerCamelCase : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : Tuple = 1
__lowerCamelCase : Union[str, Any] = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = self.type_sequence_label_size
__lowerCamelCase : Optional[int] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = False
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = SwinvaModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,embed_dim=3_7)
def lowerCAmelCase ( self : int):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def lowerCAmelCase ( self : Tuple):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def lowerCAmelCase ( self : Optional[Any]):
pass
def lowerCAmelCase ( self : Any):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
__lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,nn.Linear))
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = True
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = True
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
__lowerCamelCase : Tuple = len(self.model_tester.depths)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase : Dict = True
__lowerCamelCase : int = config.window_size**2
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(
list(attentions[0].shape[-3:]) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
__lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE__)
# Check attention is always last and order is fine
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
if hasattr(self.model_tester ,'num_hidden_states_types'):
__lowerCamelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCamelCase : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Dict = outputs.hidden_states
__lowerCamelCase : List[str] = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths) + 1)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
# Swinv2 has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
__lowerCamelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
__lowerCamelCase : Union[str, Any] = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,height * width).permute(0 ,2 ,1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = 3
__lowerCamelCase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : List[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width))
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[int] = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = _config_zero_init(SCREAMING_SNAKE_CASE__)
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(config=SCREAMING_SNAKE_CASE__)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : str):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.default_image_processor
__lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowerCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Tuple = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = torch.tensor([-0.3947, -0.4306, 0.0026]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a ={
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a ="""ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, int]:
__lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = get_letter_count(lowerCamelCase__ )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCamelCase__ )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = ''.join(freq_to_letter[freq] )
__lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCamelCase__ , reverse=lowerCamelCase__ )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : str = get_frequency_order(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 73 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 5_0 ) -> int:
__lowerCamelCase : str = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''torch''', '''torchsde''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['torch', 'torchsde'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(cls ,['torch', 'torchsde'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(cls ,['torch', 'torchsde'])
| 73 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : List[Any] = '''AutoImageProcessor'''
_UpperCAmelCase : Dict = '''AutoTokenizer'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = kwargs.pop('feature_extractor')
__lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : Optional[int] = False
def __call__( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = kwargs.pop('images' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = kwargs.pop('text' ,SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__lowerCamelCase : int = args[0]
__lowerCamelCase : List[str] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__lowerCamelCase : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[Any] = encodings['input_ids']
return inputs
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Any):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@contextmanager
def lowerCAmelCase ( self : Tuple):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = self.tokenizer
yield
__lowerCamelCase : Tuple = self.image_processor
__lowerCamelCase : Tuple = False
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
if added_vocab is None:
__lowerCamelCase : str = self.tokenizer.get_added_vocab()
__lowerCamelCase : Union[str, Any] = {}
while tokens:
__lowerCamelCase : Tuple = re.search(R'<s_(.*?)>' ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1)
__lowerCamelCase : List[str] = re.search(RF"</s_{key}>" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : List[Any] = tokens.replace(SCREAMING_SNAKE_CASE__ ,'')
else:
__lowerCamelCase : Tuple = end_token.group()
__lowerCamelCase : int = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if content is not None:
__lowerCamelCase : List[Any] = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : str = self.tokenajson(SCREAMING_SNAKE_CASE__ ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if value:
if len(SCREAMING_SNAKE_CASE__) == 1:
__lowerCamelCase : Tuple = value[0]
__lowerCamelCase : int = value
else: # leaf nodes
__lowerCamelCase : Tuple = []
for leaf in content.split(R'<sep/>'):
__lowerCamelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__)
if len(output[key]) == 1:
__lowerCamelCase : Dict = output[key][0]
__lowerCamelCase : Dict = tokens[tokens.find(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : List[str]):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 1 |
from math import ceil
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Optional[Any] = list(range(0 , lowerCamelCase__ ) )
__lowerCamelCase : Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCamelCase : Tuple = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCamelCase__ )
# Missing blocks
__lowerCamelCase : Tuple = [i for i in blocks if i not in device_map_blocks]
__lowerCamelCase : int = [i for i in device_map_blocks if i not in blocks]
if len(lowerCamelCase__ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Dict = list(range(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = int(ceil(n_layers / len(lowerCamelCase__ ) ) )
__lowerCamelCase : Tuple = [layers[i : i + n_blocks] for i in range(0 , lowerCamelCase__ , lowerCamelCase__ )]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
| 73 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=SCREAMING_SNAKE_CASE__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase : Any = {
'enabled': True,
'processes_per_host': 8,
}
__lowerCamelCase : List[Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowerCamelCase : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowerCamelCase : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=SCREAMING_SNAKE_CASE__ ,instance_type=self.instance_type ,debugger_hook_config=SCREAMING_SNAKE_CASE__ ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} ,metric_definitions=self.env.metric_definitions ,distribution=SCREAMING_SNAKE_CASE__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# create estimator
__lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,SCREAMING_SNAKE_CASE__)
| 73 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = ['''pixel_values''']
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = size if size is not None else {'shortest_edge': 3_8_4}
__lowerCamelCase : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = do_resize
__lowerCamelCase : Optional[Any] = size
# Default value set here for backwards compatibility where the value in config is None
__lowerCamelCase : Optional[int] = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__lowerCamelCase : str = resample
__lowerCamelCase : Optional[int] = do_rescale
__lowerCamelCase : int = rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize
__lowerCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
__lowerCamelCase : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
__lowerCamelCase : List[str] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCamelCase : Tuple = int(shortest_edge / crop_pct)
__lowerCamelCase : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : str = crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase : int = resample if resample is not None else self.resample
__lowerCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : Any = image_std if image_std is not None else self.image_std
__lowerCamelCase : List[Any] = size if size is not None else self.size
__lowerCamelCase : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
__lowerCamelCase : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,crop_pct=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
__lowerCamelCase : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
__lowerCamelCase : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : List[str] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
| 73 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
a =logging.get_logger(__name__)
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCamelCase : Union[str, Any] = deprecated_arg[3:]
__lowerCamelCase : Tuple = not kwargs.pop(SCREAMING_SNAKE_CASE__)
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}")
__lowerCamelCase : Optional[Any] = kwargs.pop('tpu_name' ,self.tpu_name)
__lowerCamelCase : Union[str, Any] = kwargs.pop('device_idx' ,self.device_idx)
__lowerCamelCase : Tuple = kwargs.pop('eager_mode' ,self.eager_mode)
__lowerCamelCase : Tuple = kwargs.pop('use_xla' ,self.use_xla)
super().__init__(**SCREAMING_SNAKE_CASE__)
_UpperCAmelCase : str = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name of TPU'''} , )
_UpperCAmelCase : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark models in eager model.'''} )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def lowerCAmelCase ( self : Tuple):
requires_backends(self ,['tf'])
__lowerCamelCase : str = None
if self.tpu:
try:
if self.tpu_name:
__lowerCamelCase : Any = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
__lowerCamelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCamelCase : Union[str, Any] = None
return tpu
@cached_property
def lowerCAmelCase ( self : List[Any]):
requires_backends(self ,['tf'])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
__lowerCamelCase : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU')
__lowerCamelCase : str = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}")
else:
tf.config.set_visible_devices([] ,'GPU') # disable GPU
__lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}")
return strategy
@property
def lowerCAmelCase ( self : List[Any]):
requires_backends(self ,['tf'])
return self._setup_tpu is not None
@property
def lowerCAmelCase ( self : Optional[Any]):
requires_backends(self ,['tf'])
return self._setup_strategy
@property
def lowerCAmelCase ( self : str):
requires_backends(self ,['tf'])
return tf.config.list_physical_devices('GPU')
@property
def lowerCAmelCase ( self : Union[str, Any]):
requires_backends(self ,['tf'])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def lowerCAmelCase ( self : Optional[int]):
return self.n_gpu > 0
| 73 |
import csv
import tweepy
# Twitter API credentials
a =""""""
a =""""""
a =""""""
a =""""""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
# authorize twitter, initialize tweepy
__lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ )
auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# save the id of the oldest tweet less one
__lowerCamelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : str = api.user_timeline(
screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# update the id of the oldest tweet less one
__lowerCamelCase : Optional[int] = alltweets[-1].id - 1
print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowerCamelCase : Any = csv.writer(lowerCamelCase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 73 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = '''ernie_m'''
_UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int = 2_5_0_0_0_2 ,SCREAMING_SNAKE_CASE__ : int = 7_6_8 ,SCREAMING_SNAKE_CASE__ : int = 1_2 ,SCREAMING_SNAKE_CASE__ : int = 1_2 ,SCREAMING_SNAKE_CASE__ : int = 3_0_7_2 ,SCREAMING_SNAKE_CASE__ : str = "gelu" ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : int = 5_1_4 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : float = 1E-05 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Any = layer_norm_eps
__lowerCamelCase : Optional[Any] = classifier_dropout
__lowerCamelCase : List[str] = is_decoder
__lowerCamelCase : List[str] = act_dropout
| 73 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : int=8 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=2 ,SCREAMING_SNAKE_CASE__ : int=3_6 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=5_1_2 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_6 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : List[str]=None ,):
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : Dict = seq_length
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : List[Any] = use_input_mask
__lowerCamelCase : List[Any] = use_token_type_ids
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : int = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = num_labels
__lowerCamelCase : Optional[int] = num_choices
__lowerCamelCase : List[Any] = scope
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : int = None
if self.use_input_mask:
__lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : int = None
if self.use_token_type_ids:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
__lowerCamelCase : List[str] = None
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices)
__lowerCamelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Any):
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Dict = self.get_config()
__lowerCamelCase : Tuple = 3_0_0
return config
def lowerCAmelCase ( self : Optional[int]):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : str = MraModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ,):
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = MraModel(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Dict = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,encoder_attention_mask=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : List[str] = MraForMaskedLM(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = MraForQuestionAnswering(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Tuple = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : Optional[Any] = MraForSequenceClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : Optional[Any] = MraForTokenClassification(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Any = self.num_choices
__lowerCamelCase : int = MraForMultipleChoice(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
__lowerCamelCase : int = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
__lowerCamelCase : str = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
__lowerCamelCase : Union[str, Any] = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Tuple = ()
def lowerCAmelCase ( self : str):
__lowerCamelCase : Tuple = MraModelTester(self)
__lowerCamelCase : Optional[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : Optional[Any]):
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Optional[int] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : int):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = MraModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='MRA does not output attentions')
def lowerCAmelCase ( self : Any):
return
@require_torch
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = MraModel.from_pretrained('uw-madison/mra-base-512-4')
__lowerCamelCase : List[str] = torch.arange(2_5_6).unsqueeze(0)
with torch.no_grad():
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Dict = torch.Size((1, 2_5_6, 7_6_8))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
@slow
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4')
__lowerCamelCase : str = torch.arange(2_5_6).unsqueeze(0)
with torch.no_grad():
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Tuple = 5_0_2_6_5
__lowerCamelCase : Tuple = torch.Size((1, 2_5_6, vocab_size))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
@slow
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[Any] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3')
__lowerCamelCase : List[str] = torch.arange(4_0_9_6).unsqueeze(0)
with torch.no_grad():
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : List[Any] = 5_0_2_6_5
__lowerCamelCase : List[str] = torch.Size((1, 4_0_9_6, vocab_size))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[tuple[float, float]]):
__lowerCamelCase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__) - 1
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree ,SCREAMING_SNAKE_CASE__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE__) ,5) == 1
return output_values
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : Tuple = self.basis_function(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
__lowerCamelCase : list[float] = [] # x coordinates of points to plot
__lowerCamelCase : list[float] = [] # y coordinates of points to plot
__lowerCamelCase : Any = 0.0
while t <= 1:
__lowerCamelCase : List[Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
__lowerCamelCase : Optional[Any] = [i[0] for i in self.list_of_points]
__lowerCamelCase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='blue' ,label='Curve of Degree ' + str(self.degree) ,)
plt.scatter(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='red' ,label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 73 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''pix2struct_text_model'''
_UpperCAmelCase : str = ['''past_key_values''']
_UpperCAmelCase : List[Any] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : List[str]=5_0_2_4_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 ,SCREAMING_SNAKE_CASE__ : Tuple=6_4 ,SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2_8 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=1E-6 ,SCREAMING_SNAKE_CASE__ : Dict=1.0 ,SCREAMING_SNAKE_CASE__ : int="gelu_new" ,SCREAMING_SNAKE_CASE__ : int=0 ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=True ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : str = d_kv
__lowerCamelCase : List[Any] = d_ff
__lowerCamelCase : List[Any] = num_layers
__lowerCamelCase : str = num_heads
__lowerCamelCase : Dict = relative_attention_num_buckets
__lowerCamelCase : List[str] = relative_attention_max_distance
__lowerCamelCase : Union[str, Any] = dropout_rate
__lowerCamelCase : Optional[int] = layer_norm_epsilon
__lowerCamelCase : Dict = initializer_factor
__lowerCamelCase : Any = use_cache
__lowerCamelCase : int = eos_token_id
__lowerCamelCase : Dict = decoder_start_token_id
# for backwards compatibility
__lowerCamelCase : Dict = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,is_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@classmethod
def lowerCAmelCase ( cls : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ,**SCREAMING_SNAKE_CASE__ : str):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : str = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
__lowerCamelCase : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = '''pix2struct_vision_model'''
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 ,SCREAMING_SNAKE_CASE__ : int=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]="gelu_new" ,SCREAMING_SNAKE_CASE__ : str=1E-6 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=1E-10 ,SCREAMING_SNAKE_CASE__ : Any=1.0 ,SCREAMING_SNAKE_CASE__ : List[str]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_2_8 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = hidden_size
__lowerCamelCase : List[str] = patch_embed_hidden_size
__lowerCamelCase : Dict = d_ff
__lowerCamelCase : Optional[Any] = dropout_rate
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : List[Any] = initializer_factor
__lowerCamelCase : List[str] = attention_dropout
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : Any = dense_act_fn
__lowerCamelCase : int = seq_len
__lowerCamelCase : Tuple = relative_attention_num_buckets
__lowerCamelCase : int = relative_attention_max_distance
__lowerCamelCase : List[Any] = d_kv
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
__lowerCamelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = '''pix2struct'''
_UpperCAmelCase : str = True
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Dict=1.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : int=True ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text_config is None:
__lowerCamelCase : Any = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.')
if vision_config is None:
__lowerCamelCase : str = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.')
__lowerCamelCase : Union[str, Any] = PixaStructTextConfig(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.text_config.decoder_start_token_id
__lowerCamelCase : Any = self.text_config.pad_token_id
__lowerCamelCase : Optional[int] = self.text_config.eos_token_id
__lowerCamelCase : Dict = initializer_factor
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : Tuple = self.initializer_range
__lowerCamelCase : Tuple = self.initializer_range
__lowerCamelCase : str = is_vqa
@classmethod
def lowerCAmelCase ( cls : Tuple ,SCREAMING_SNAKE_CASE__ : PixaStructTextConfig ,SCREAMING_SNAKE_CASE__ : PixaStructVisionConfig ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : Union[str, Any] = self.text_config.to_dict()
__lowerCamelCase : Tuple = self.vision_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output
| 73 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 | 1 |
import os
from collections.abc import Iterator
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase__ , lowerCamelCase__ ).lstrip('./' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return F"{i * ' '}*" if i else "\n##"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase__ ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(lowerCamelCase__ )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "." ) -> None:
__lowerCamelCase : Optional[int] = ''
for filepath in sorted(good_file_paths(lowerCamelCase__ ) ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = os.path.split(lowerCamelCase__ )
if filepath != old_path:
__lowerCamelCase : Tuple = print_path(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase : Optional[int] = F"{filepath}/{filename}".replace(' ' , '%20' )
__lowerCamelCase : Tuple = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"{md_prefix(lowerCamelCase__ )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 73 |
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73 | 1 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import os
import sys
a =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
| 73 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase , __lowerCamelCase : Dict = analyze_text(lowerCamelCase__ )
__lowerCamelCase : List[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCamelCase : int = sum(single_char_strings.values() )
# one length string
__lowerCamelCase : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCamelCase : str = single_char_strings[ch]
__lowerCamelCase : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase__ ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__lowerCamelCase : Tuple = sum(two_char_strings.values() )
__lowerCamelCase : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCamelCase : str = cha + cha
if sequence in two_char_strings:
__lowerCamelCase : str = two_char_strings[sequence]
__lowerCamelCase : Optional[int] = int(lowerCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase__ )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[dict, dict]:
__lowerCamelCase : List[str] = Counter() # type: ignore
__lowerCamelCase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 73 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 1 |
import argparse
import os
import re
a ="""src/transformers"""
# Pattern that looks at the indentation in a line.
a =re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
a =re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a =re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
a =re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a =re.compile(r"""\[([^\]]+)\]""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : List[str] = _re_indent.search(lowerCamelCase__ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
__lowerCamelCase : int = 0
__lowerCamelCase : int = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase__ ):
index += 1
__lowerCamelCase : Dict = ['\n'.join(lines[:index] )]
else:
__lowerCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCamelCase : str = [lines[index]]
index += 1
while index < len(lowerCamelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowerCamelCase__ ) )
if index < len(lowerCamelCase__ ) - 1:
__lowerCamelCase : Dict = [lines[index + 1]]
index += 1
else:
__lowerCamelCase : List[str] = []
else:
blocks.append('\n'.join(lowerCamelCase__ ) )
__lowerCamelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase__ ) > 0:
blocks.append('\n'.join(lowerCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
def _inner(lowerCamelCase__ ):
return key(lowerCamelCase__ ).lower().replace('_' , '' )
return _inner
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(lowerCamelCase__ ):
return x
if key is None:
__lowerCamelCase : Dict = noop
# Constants are all uppercase, they go first.
__lowerCamelCase : List[Any] = [obj for obj in objects if key(lowerCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(lowerCamelCase__ )[0].isupper() and not key(lowerCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCamelCase : Union[str, Any] = [obj for obj in objects if not key(lowerCamelCase__ )[0].isupper()]
__lowerCamelCase : List[Any] = ignore_underscore(lowerCamelCase__ )
return sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
# This inner function sort imports between [ ].
def _replace(lowerCamelCase__ ):
__lowerCamelCase : Dict = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowerCamelCase : Any = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCamelCase : List[Any] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCamelCase__ )] ) + "]"
__lowerCamelCase : int = import_statement.split('\n' )
if len(lowerCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCamelCase : Any = 2 if lines[1].strip() == '[' else 1
__lowerCamelCase : Optional[Any] = [(i, _re_strip_line.search(lowerCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCamelCase : Tuple = sort_objects(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )
__lowerCamelCase : Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCamelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCamelCase : int = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCamelCase : Optional[int] = keys[:-1]
__lowerCamelCase : List[str] = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(lowerCamelCase__ )] )
return "\n".join(lowerCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__lowerCamelCase : Any = _re_bracket_content.sub(_replace , lowerCamelCase__ )
return import_statement
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=True ) -> Dict:
with open(lowerCamelCase__ , encoding='utf-8' ) as f:
__lowerCamelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCamelCase : str = split_code_in_indented_blocks(
lowerCamelCase__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCamelCase : Optional[int] = main_blocks[block_idx]
__lowerCamelCase : str = block.split('\n' )
# Get to the start of the imports.
__lowerCamelCase : Union[str, Any] = 0
while line_idx < len(lowerCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCamelCase : List[Any] = len(lowerCamelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCamelCase : Dict = '\n'.join(block_lines[line_idx:-1] )
__lowerCamelCase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCamelCase : Any = split_code_in_indented_blocks(lowerCamelCase__ , indent_level=lowerCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCamelCase : Optional[Any] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCamelCase : Optional[Any] = [(pattern.search(lowerCamelCase__ ).groups()[0] if pattern.search(lowerCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(lowerCamelCase__ ) if key is not None]
__lowerCamelCase : str = [x[0] for x in sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowerCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__lowerCamelCase : Tuple = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=True ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
__lowerCamelCase : Optional[Any] = sort_imports(os.path.join(lowerCamelCase__ , '__init__.py' ) , check_only=lowerCamelCase__ )
if result:
__lowerCamelCase : Tuple = [os.path.join(lowerCamelCase__ , '__init__.py' )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"Would overwrite {len(lowerCamelCase__ )} files, run `make style`." )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
__lowerCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : Dict = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 73 | 1 |
a =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a =[None] * 10000000
a =True
a =False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCamelCase : Dict = chain(next_number(lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = number_chain
while number < 1_0_0_0_0_0_0_0:
__lowerCamelCase : List[Any] = number_chain
number *= 1_0
return number_chain
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , lowerCamelCase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 73 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
a =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : List[Any] = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : str = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowerCamelCase__ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=4 )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if params.n_gpu <= 0:
__lowerCamelCase : Any = 0
__lowerCamelCase : Union[str, Any] = -1
__lowerCamelCase : str = True
__lowerCamelCase : List[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
__lowerCamelCase : Tuple = int(os.environ['WORLD_SIZE'] )
__lowerCamelCase : int = int(os.environ['N_GPU_NODE'] )
__lowerCamelCase : Dict = int(os.environ['RANK'] )
# number of nodes / node ID
__lowerCamelCase : Dict = params.world_size // params.n_gpu_per_node
__lowerCamelCase : int = params.global_rank // params.n_gpu_per_node
__lowerCamelCase : Union[str, Any] = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
__lowerCamelCase : int = 1
__lowerCamelCase : Dict = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__lowerCamelCase : List[str] = params.node_id == 0 and params.local_rank == 0
__lowerCamelCase : Dict = params.n_nodes > 1
# summary
__lowerCamelCase : List[str] = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 73 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
a =parser.parse_args()
a =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 73 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowerCamelCase__ ), magnitude * sin(lowerCamelCase__ )]
return [magnitude * cos(radians(lowerCamelCase__ ) ), magnitude * sin(radians(lowerCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_0**-1 ) -> bool:
__lowerCamelCase : NDArray[floataa] = cross(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : float = sum(lowerCamelCase__ )
return abs(lowerCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
a =array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a =array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
a =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a =logging.get_logger(__name__)
a ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''conditional_detr'''
_UpperCAmelCase : int = ['''past_key_values''']
_UpperCAmelCase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : int=3_0_0 ,SCREAMING_SNAKE_CASE__ : str=6 ,SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : int=6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=8 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" ,SCREAMING_SNAKE_CASE__ : List[Any]=2_5_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Dict="sine" ,SCREAMING_SNAKE_CASE__ : int="resnet50" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[str]=1 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Dict=0.25 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowerCamelCase : str = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : int = backbone_config.get('model_type')
__lowerCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = use_timm_backbone
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : str = encoder_ffn_dim
__lowerCamelCase : Union[str, Any] = encoder_layers
__lowerCamelCase : Union[str, Any] = encoder_attention_heads
__lowerCamelCase : Union[str, Any] = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = decoder_layers
__lowerCamelCase : int = decoder_attention_heads
__lowerCamelCase : Optional[Any] = dropout
__lowerCamelCase : Optional[Any] = attention_dropout
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : int = init_xavier_std
__lowerCamelCase : Any = encoder_layerdrop
__lowerCamelCase : str = decoder_layerdrop
__lowerCamelCase : Dict = encoder_layers
__lowerCamelCase : List[str] = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
__lowerCamelCase : List[str] = backbone
__lowerCamelCase : Dict = use_pretrained_backbone
__lowerCamelCase : Union[str, Any] = dilation
# Hungarian matcher
__lowerCamelCase : Dict = class_cost
__lowerCamelCase : Dict = bbox_cost
__lowerCamelCase : Any = giou_cost
# Loss coefficients
__lowerCamelCase : List[str] = mask_loss_coefficient
__lowerCamelCase : Optional[int] = dice_loss_coefficient
__lowerCamelCase : Any = cls_loss_coefficient
__lowerCamelCase : str = bbox_loss_coefficient
__lowerCamelCase : str = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : int):
return self.d_model
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowerCamelCase : str = self.backbone_config.to_dict()
__lowerCamelCase : Optional[int] = self.__class__.model_type
return output
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Optional[int]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def lowerCAmelCase ( self : Optional[Any]):
return 1E-5
@property
def lowerCAmelCase ( self : str):
return 1_2
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Dict = (
AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else mask_token
)
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Union[str, Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Optional[Any]):
return len(self.sp_model)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Tuple = None
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
if self.remove_space:
__lowerCamelCase : Dict = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Optional[Any] = inputs
__lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
__lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73 | 1 |
from math import ceil
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_1 ) -> int:
__lowerCamelCase : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowerCamelCase : Any = 2 * i + 1
__lowerCamelCase : Tuple = 2 * i
__lowerCamelCase : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''gpt_neox'''
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=5_0_4_3_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_1_4_4 ,SCREAMING_SNAKE_CASE__ : List[str]=4_4 ,SCREAMING_SNAKE_CASE__ : str=6_4 ,SCREAMING_SNAKE_CASE__ : Any=2_4_5_7_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : List[str]=0.25 ,SCREAMING_SNAKE_CASE__ : int=1_0_0_0_0 ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Any=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : List[Any]=1E-5 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Any=0 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[Any] = rotary_pct
__lowerCamelCase : Optional[int] = rotary_emb_base
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : Optional[int] = hidden_dropout
__lowerCamelCase : Dict = classifier_dropout
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : str = use_cache
__lowerCamelCase : Optional[int] = tie_word_embeddings
__lowerCamelCase : Optional[int] = use_parallel_residual
__lowerCamelCase : str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!')
def lowerCAmelCase ( self : Optional[int]):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,SCREAMING_SNAKE_CASE__) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
__lowerCamelCase : Any = self.rope_scaling.get('type' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.rope_scaling.get('factor' ,SCREAMING_SNAKE_CASE__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 73 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a =get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : int = DebertaVaTokenizer
_UpperCAmelCase : int = DebertaVaTokenizerFast
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Tuple = True
def lowerCAmelCase ( self : Optional[int]):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : List[Any] = 'this is a test'
__lowerCamelCase : Optional[int] = 'this is a test'
return input_text, output_text
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = '<pad>'
__lowerCamelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'<pad>')
self.assertEqual(vocab_keys[1] ,'<unk>')
self.assertEqual(vocab_keys[-1] ,'[PAD]')
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,3_0_0_0_1)
def lowerCAmelCase ( self : List[Any]):
self.assertEqual(self.get_tokenizer().vocab_size ,3_0_0_0_0)
def lowerCAmelCase ( self : Dict):
# fmt: off
__lowerCamelCase : List[Any] = ' \tHeLLo!how \n Are yoU? '
__lowerCamelCase : Tuple = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__lowerCamelCase : int = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def lowerCAmelCase ( self : Union[str, Any]):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def lowerCAmelCase ( self : int):
pass
def lowerCAmelCase ( self : str):
# fmt: off
__lowerCamelCase : Tuple = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__lowerCamelCase : Tuple = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
# fmt: off
__lowerCamelCase : Optional[Any] = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : Optional[int] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__lowerCamelCase : Dict = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
# fmt: off
__lowerCamelCase : Dict = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__lowerCamelCase : str = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
# fmt: off
__lowerCamelCase : List[Any] = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : Union[str, Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__lowerCamelCase : int = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
# fmt: off
__lowerCamelCase : List[str] = ' \tHeLLo!how \n Are yoU? '
__lowerCamelCase : Any = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__lowerCamelCase : int = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : List[str] = self.get_rust_tokenizer()
__lowerCamelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCamelCase : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = 'This is a test'
__lowerCamelCase : int = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCamelCase : str = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
__lowerCamelCase : Optional[Any] = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__lowerCamelCase : List[str] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# fmt: off
__lowerCamelCase : Any = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : Union[str, Any] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCamelCase : Optional[int] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__lowerCamelCase : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__lowerCamelCase : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer.encode('sequence builders')
__lowerCamelCase : Union[str, Any] = tokenizer.encode('multi-sequence build')
__lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,SCREAMING_SNAKE_CASE__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,SCREAMING_SNAKE_CASE__ ,)
@slow
def lowerCAmelCase ( self : Any):
# fmt: off
__lowerCamelCase : int = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ ,model_name='microsoft/deberta-v2-xlarge' ,revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' ,)
| 73 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a ={
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a ="""ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, int]:
__lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = get_letter_count(lowerCamelCase__ )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCamelCase__ )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = ''.join(freq_to_letter[freq] )
__lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCamelCase__ , reverse=lowerCamelCase__ )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : str = get_frequency_order(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__lowerCamelCase : Optional[int] = ''
while len(lowerCamelCase__ ) % 3 != 0:
__lowerCamelCase : int = '0' + bin_string
__lowerCamelCase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(lowerCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowerCamelCase : List[Any] = 0
for index, val in enumerate(lowerCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase__ ) )
oct_string += str(lowerCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[tuple[float, float]]):
__lowerCamelCase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__) - 1
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree ,SCREAMING_SNAKE_CASE__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE__) ,5) == 1
return output_values
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : Tuple = self.basis_function(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
__lowerCamelCase : list[float] = [] # x coordinates of points to plot
__lowerCamelCase : list[float] = [] # y coordinates of points to plot
__lowerCamelCase : Any = 0.0
while t <= 1:
__lowerCamelCase : List[Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
__lowerCamelCase : Optional[Any] = [i[0] for i in self.list_of_points]
__lowerCamelCase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='blue' ,label='Curve of Degree ' + str(self.degree) ,)
plt.scatter(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='red' ,label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 73 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a =16
a =32
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : int = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
__lowerCamelCase : List[Any] = 8
else:
__lowerCamelCase : Any = None
return tokenizer.pad(
lowerCamelCase__ , padding='longest' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
__lowerCamelCase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a =mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCamelCase__ ) == "1":
__lowerCamelCase : Union[str, Any] = 2
# Initialize accelerator
__lowerCamelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Tuple = config['lr']
__lowerCamelCase : List[str] = int(config['num_epochs'] )
__lowerCamelCase : List[Any] = int(config['seed'] )
__lowerCamelCase : int = int(config['batch_size'] )
__lowerCamelCase : List[str] = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCamelCase__ )
def inner_training_loop(lowerCamelCase__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Any = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : str = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
# Instantiate scheduler
__lowerCamelCase : int = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Optional[Any] = model(**lowerCamelCase__ )
__lowerCamelCase : Tuple = outputs.loss
accelerator.backward(lowerCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : List[str] = model(**lowerCamelCase__ )
__lowerCamelCase : str = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
__lowerCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCamelCase : Optional[int] = parser.parse_args()
__lowerCamelCase : Optional[int] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 73 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : List[Any] = '''AutoImageProcessor'''
_UpperCAmelCase : Dict = '''AutoTokenizer'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = kwargs.pop('feature_extractor')
__lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : Optional[int] = False
def __call__( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = kwargs.pop('images' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = kwargs.pop('text' ,SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__lowerCamelCase : int = args[0]
__lowerCamelCase : List[str] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__lowerCamelCase : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[Any] = encodings['input_ids']
return inputs
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Any):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@contextmanager
def lowerCAmelCase ( self : Tuple):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = self.tokenizer
yield
__lowerCamelCase : Tuple = self.image_processor
__lowerCamelCase : Tuple = False
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[Any]=None):
if added_vocab is None:
__lowerCamelCase : str = self.tokenizer.get_added_vocab()
__lowerCamelCase : Union[str, Any] = {}
while tokens:
__lowerCamelCase : Tuple = re.search(R'<s_(.*?)>' ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1)
__lowerCamelCase : List[str] = re.search(RF"</s_{key}>" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : List[Any] = tokens.replace(SCREAMING_SNAKE_CASE__ ,'')
else:
__lowerCamelCase : Tuple = end_token.group()
__lowerCamelCase : int = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = re.escape(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" ,SCREAMING_SNAKE_CASE__ ,re.IGNORECASE)
if content is not None:
__lowerCamelCase : List[Any] = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : str = self.tokenajson(SCREAMING_SNAKE_CASE__ ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if value:
if len(SCREAMING_SNAKE_CASE__) == 1:
__lowerCamelCase : Tuple = value[0]
__lowerCamelCase : int = value
else: # leaf nodes
__lowerCamelCase : Tuple = []
for leaf in content.split(R'<sep/>'):
__lowerCamelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__)
if len(output[key]) == 1:
__lowerCamelCase : Dict = output[key][0]
__lowerCamelCase : Dict = tokens[tokens.find(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=SCREAMING_SNAKE_CASE__ ,added_vocab=SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : List[str]):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a =int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 73 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=SCREAMING_SNAKE_CASE__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase : Any = {
'enabled': True,
'processes_per_host': 8,
}
__lowerCamelCase : List[Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowerCamelCase : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowerCamelCase : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=SCREAMING_SNAKE_CASE__ ,instance_type=self.instance_type ,debugger_hook_config=SCREAMING_SNAKE_CASE__ ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} ,metric_definitions=self.env.metric_definitions ,distribution=SCREAMING_SNAKE_CASE__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# create estimator
__lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,SCREAMING_SNAKE_CASE__)
| 73 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ,SCREAMING_SNAKE_CASE__ : Tuple="<s>" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" ,SCREAMING_SNAKE_CASE__ : Any="<unk>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<sep>" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : List[Any]="<cls>" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<mask>" ,SCREAMING_SNAKE_CASE__ : Dict=["<eop>", "<eod>"] ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,additional_special_tokens=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : str = do_lower_case
__lowerCamelCase : Optional[int] = remove_space
__lowerCamelCase : List[Any] = keep_accents
__lowerCamelCase : Any = vocab_file
__lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.')
__lowerCamelCase : Optional[int] = jieba
__lowerCamelCase : str = str.maketrans(' \n' ,'\u2582\u2583')
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase ( self : Dict):
return len(self.sp_model)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[Any]):
__lowerCamelCase : List[str] = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
return state
def __setstate__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[Any] = {}
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int):
if self.remove_space:
__lowerCamelCase : List[Any] = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Dict = inputs
__lowerCamelCase : List[str] = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : str = outputs.lower()
return outputs
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Dict = cur_pieces[1:]
else:
__lowerCamelCase : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip()
return out_string
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
def lowerCAmelCase ( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : List[Any] = super()._decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = text.replace(' ' ,'').replace('\u2582' ,' ').replace('\u2583' ,'\n')
return text
| 73 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''pixel_values''']
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PIL.Image.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
__lowerCamelCase : int = get_size_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : Optional[int] = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Any = resample
__lowerCamelCase : Optional[int] = do_center_crop
__lowerCamelCase : Any = crop_size
__lowerCamelCase : Optional[int] = do_rescale
__lowerCamelCase : int = rescale_factor
__lowerCamelCase : str = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PIL.Image.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return resize(
SCREAMING_SNAKE_CASE__ ,size=(size['height'], size['width']) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
__lowerCamelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size['height'], size['width']) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Dict ,):
__lowerCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : int = resample if resample is not None else self.resample
__lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Any = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : str = image_std if image_std is not None else self.image_std
__lowerCamelCase : Union[str, Any] = size if size is not None else self.size
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
__lowerCamelCase : Any = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__) for image in images]
if do_center_crop:
__lowerCamelCase : Dict = [self.center_crop(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
__lowerCamelCase : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
__lowerCamelCase : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Optional[int] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
| 73 |
import csv
import tweepy
# Twitter API credentials
a =""""""
a =""""""
a =""""""
a =""""""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
# authorize twitter, initialize tweepy
__lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ )
auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# save the id of the oldest tweet less one
__lowerCamelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : str = api.user_timeline(
screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# update the id of the oldest tweet less one
__lowerCamelCase : Optional[int] = alltweets[-1].id - 1
print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowerCamelCase : Any = csv.writer(lowerCamelCase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 73 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : str=1_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_0 ,SCREAMING_SNAKE_CASE__ : List[str]=4_0_0 ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[Any]=[0.5, 0.5, 0.5] ,):
__lowerCamelCase : Any = size if size is not None else {'shortest_edge': 1_8}
__lowerCamelCase : int = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : List[Any] = min_resolution
__lowerCamelCase : Any = max_resolution
__lowerCamelCase : Optional[int] = do_resize
__lowerCamelCase : Optional[int] = size
__lowerCamelCase : List[Any] = do_center_crop
__lowerCamelCase : Tuple = crop_size
__lowerCamelCase : List[str] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean
__lowerCamelCase : int = image_std
def lowerCAmelCase ( self : List[str]):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : List[Any] = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[int] = LevitImageProcessingTester(self)
@property
def lowerCAmelCase ( self : int):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_center_crop'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8})
self.assertEqual(image_processor.crop_size ,{'height': 1_8, 'width': 1_8})
__lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4)
self.assertEqual(image_processor.size ,{'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size ,{'height': 8_4, 'width': 8_4})
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCAmelCase ( self : Dict):
# Initialize image_processing
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : List[str] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 73 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 73 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[tuple[float, float]]):
__lowerCamelCase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__) - 1
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree ,SCREAMING_SNAKE_CASE__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE__) ,5) == 1
return output_values
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : Tuple = self.basis_function(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
__lowerCamelCase : list[float] = [] # x coordinates of points to plot
__lowerCamelCase : list[float] = [] # y coordinates of points to plot
__lowerCamelCase : Any = 0.0
while t <= 1:
__lowerCamelCase : List[Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
__lowerCamelCase : Optional[Any] = [i[0] for i in self.list_of_points]
__lowerCamelCase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='blue' ,label='Curve of Degree ' + str(self.degree) ,)
plt.scatter(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='red' ,label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 73 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a ={
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
a ={
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
a ={
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = BertTokenizer
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" ,SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" ,SCREAMING_SNAKE_CASE__ : str="[PAD]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : str="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type'))
__lowerCamelCase : str = do_lower_case
__lowerCamelCase : Any = strip_accents
__lowerCamelCase : Any = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None):
__lowerCamelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : str = [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 73 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a =logging.getLogger(__name__)
@dataclass
class A_ :
_UpperCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : str):
if self.train_file is not None:
__lowerCamelCase : str = self.train_file.split('.')[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCamelCase : List[str] = self.validation_file.split('.')[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
_UpperCAmelCase : PreTrainedTokenizerBase
_UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Union[str, Any] = 'label' if 'label' in features[0].keys() else 'labels'
__lowerCamelCase : Any = [feature.pop(SCREAMING_SNAKE_CASE__) for feature in features]
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = len(features[0]['input_ids'])
__lowerCamelCase : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(SCREAMING_SNAKE_CASE__)] for feature in features
]
__lowerCamelCase : List[Any] = list(chain(*SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = self.tokenizer.pad(
SCREAMING_SNAKE_CASE__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
__lowerCamelCase : Optional[int] = {k: v.view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,-1) for k, v in batch.items()}
# Add back labels
__lowerCamelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ ,dtype=torch.intaa)
return batch
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase : int = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCamelCase : Union[str, Any] = {}
if data_args.train_file is not None:
__lowerCamelCase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase : List[str] = data_args.validation_file
__lowerCamelCase : Optional[Any] = data_args.train_file.split('.' )[-1]
__lowerCamelCase : Optional[Any] = load_dataset(
lowerCamelCase__ , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCamelCase : Optional[Any] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCamelCase : Optional[Any] = [F"ending{i}" for i in range(4 )]
__lowerCamelCase : int = 'sent1'
__lowerCamelCase : Optional[Any] = 'sent2'
if data_args.max_seq_length is None:
__lowerCamelCase : List[Any] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__lowerCamelCase : Union[str, Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__lowerCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ ):
__lowerCamelCase : Any = [[context] * 4 for context in examples[context_name]]
__lowerCamelCase : Dict = examples[question_header_name]
__lowerCamelCase : List[str] = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(lowerCamelCase__ )
]
# Flatten out
__lowerCamelCase : Any = list(chain(*lowerCamelCase__ ) )
__lowerCamelCase : List[str] = list(chain(*lowerCamelCase__ ) )
# Tokenize
__lowerCamelCase : List[Any] = tokenizer(
lowerCamelCase__ , lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__lowerCamelCase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
__lowerCamelCase : Optional[Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
__lowerCamelCase : Any = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__lowerCamelCase : Union[str, Any] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__lowerCamelCase : Tuple = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__lowerCamelCase : Optional[int] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
__lowerCamelCase : Optional[int] = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__lowerCamelCase : Dict = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCamelCase : Dict = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ ):
__lowerCamelCase , __lowerCamelCase : List[str] = eval_predictions
__lowerCamelCase : Union[str, Any] = np.argmax(lowerCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCamelCase : List[str] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
__lowerCamelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : int = last_checkpoint
__lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase : int = train_result.metrics
__lowerCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
__lowerCamelCase : Tuple = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('train' , lowerCamelCase__ )
trainer.save_metrics('train' , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Tuple = trainer.evaluate()
__lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('eval' , lowerCamelCase__ )
trainer.save_metrics('eval' , lowerCamelCase__ )
__lowerCamelCase : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 |
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__=True , lowerCamelCase__=2 ) -> List[Any]:
from .. import __version__
__lowerCamelCase : List[Any] = take_from
__lowerCamelCase : int = ()
if not isinstance(args[0] , lowerCamelCase__ ):
__lowerCamelCase : Tuple = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse(lowerCamelCase__ ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
__lowerCamelCase : Optional[int] = None
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase__ ),)
__lowerCamelCase : int = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
values += (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
__lowerCamelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__lowerCamelCase : str = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__lowerCamelCase : Optional[int] = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , lowerCamelCase__ , stacklevel=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
__lowerCamelCase : List[str] = inspect.getouterframes(inspect.currentframe() )[1]
__lowerCamelCase : Optional[int] = call_frame.filename
__lowerCamelCase : List[str] = call_frame.lineno
__lowerCamelCase : Union[str, Any] = call_frame.function
__lowerCamelCase , __lowerCamelCase : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(lowerCamelCase__ ) == 0:
return
elif len(lowerCamelCase__ ) == 1:
return values[0]
return values
| 73 |
import os
import sys
a =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[int]:
if length <= 0 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 73 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = '''trocr'''
_UpperCAmelCase : int = ['''past_key_values''']
_UpperCAmelCase : Any = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=5_0_2_6_5 ,SCREAMING_SNAKE_CASE__ : int=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : str=1_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,**SCREAMING_SNAKE_CASE__ : int ,):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : Union[str, Any] = decoder_layers
__lowerCamelCase : Optional[int] = decoder_attention_heads
__lowerCamelCase : str = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : Any = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Optional[Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : Dict = scale_embedding
__lowerCamelCase : List[str] = use_learned_position_embeddings
__lowerCamelCase : int = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
__lowerCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : Dict = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 73 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
__lowerCamelCase : Tuple = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__lowerCamelCase : Tuple = 1
if upper_limit > 0:
__lowerCamelCase : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
a =int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 73 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 73 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) ,SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) ,SCREAMING_SNAKE_CASE__ : Tuple[int] = (6_4,) ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : str = "silu" ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : float = 0.18215 ,SCREAMING_SNAKE_CASE__ : str = "group" ,):
super().__init__()
# pass init params to Encoder
__lowerCamelCase : List[Any] = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,down_block_types=SCREAMING_SNAKE_CASE__ ,block_out_channels=SCREAMING_SNAKE_CASE__ ,layers_per_block=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__ ,norm_num_groups=SCREAMING_SNAKE_CASE__ ,double_z=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCamelCase : Any = nn.Convad(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,1)
__lowerCamelCase : Optional[Any] = VectorQuantizer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,beta=0.25 ,remap=SCREAMING_SNAKE_CASE__ ,sane_index_shape=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,1)
# pass init params to Decoder
__lowerCamelCase : Dict = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,up_block_types=SCREAMING_SNAKE_CASE__ ,block_out_channels=SCREAMING_SNAKE_CASE__ ,layers_per_block=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__ ,norm_num_groups=SCREAMING_SNAKE_CASE__ ,norm_type=SCREAMING_SNAKE_CASE__ ,)
@apply_forward_hook
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : int = self.encoder(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.quant_conv(SCREAMING_SNAKE_CASE__)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__)
@apply_forward_hook
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
# also go through quantization layer
if not force_not_quantize:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = self.quantize(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Optional[Any] = h
__lowerCamelCase : Optional[Any] = self.post_quant_conv(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.decoder(SCREAMING_SNAKE_CASE__ ,quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Union[str, Any] = sample
__lowerCamelCase : Dict = self.encode(SCREAMING_SNAKE_CASE__).latents
__lowerCamelCase : Union[str, Any] = self.decode(SCREAMING_SNAKE_CASE__).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__)
| 73 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a =TypeVar("""T""")
class A_ ( Generic[T] ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : T):
__lowerCamelCase : List[Any] = data
__lowerCamelCase : Node[T] | None = None
def __str__( self : Any):
return F"{self.data}"
class A_ ( Generic[T] ):
def __init__( self : Optional[Any]):
__lowerCamelCase : Node[T] | None = None
def __iter__( self : int):
__lowerCamelCase : List[str] = self.top
while node:
yield node.data
__lowerCamelCase : Optional[int] = node.next
def __str__( self : List[Any]):
return "->".join([str(SCREAMING_SNAKE_CASE__) for item in self])
def __len__( self : List[str]):
return len(tuple(iter(self)))
def lowerCAmelCase ( self : int):
return self.top is None
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : T):
__lowerCamelCase : Dict = Node(SCREAMING_SNAKE_CASE__)
if not self.is_empty():
__lowerCamelCase : int = self.top
__lowerCamelCase : Union[str, Any] = node
def lowerCAmelCase ( self : Tuple):
if self.is_empty():
raise IndexError('pop from empty stack')
assert isinstance(self.top ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.top
__lowerCamelCase : List[str] = self.top.next
return pop_node.data
def lowerCAmelCase ( self : Union[str, Any]):
if self.is_empty():
raise IndexError('peek from empty stack')
assert self.top is not None
return self.top.data
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : Dict=[1_0, 2_0, 3_0, 4_0] ,SCREAMING_SNAKE_CASE__ : List[Any]=[1, 1, 2, 1] ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]="relu" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=None ,):
__lowerCamelCase : str = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Optional[int] = embeddings_size
__lowerCamelCase : List[Any] = hidden_sizes
__lowerCamelCase : int = depths
__lowerCamelCase : Any = is_training
__lowerCamelCase : Any = use_labels
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[Any] = num_labels
__lowerCamelCase : List[str] = scope
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : str):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Dict = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : List[Any] = config_and_inputs
__lowerCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : List[str] = False
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = FlaxRegNetModelTester(self)
__lowerCamelCase : str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Union[str, Any]):
return
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='RegNet does not use inputs_embeds')
def lowerCAmelCase ( self : List[str]):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def lowerCAmelCase ( self : Dict):
pass
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[Any] = [*signature.parameters.keys()]
__lowerCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,expected_num_stages + 1)
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE__)
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
return model(pixel_values=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
with self.subTest('JIT Enabled'):
__lowerCamelCase : str = model_jitted(**SCREAMING_SNAKE_CASE__).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__lowerCamelCase : Dict = model_jitted(**SCREAMING_SNAKE_CASE__).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(SCREAMING_SNAKE_CASE__))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
self.assertEqual(jitted_output.shape ,output.shape)
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
__lowerCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Optional[Any]):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Optional[Any] = prepare_img()
__lowerCamelCase : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='np')
__lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[Any] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Dict = (
AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else mask_token
)
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Union[str, Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Optional[Any]):
return len(self.sp_model)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Tuple = None
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
if self.remove_space:
__lowerCamelCase : Dict = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Optional[Any] = inputs
__lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
__lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a =None
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
a ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
a ="""▁"""
# Segments (not really needed)
a =0
a =1
a =2
a =3
a =4
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = '''left'''
_UpperCAmelCase : Dict = XLNetTokenizer
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" ,SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" ,SCREAMING_SNAKE_CASE__ : int="<pad>" ,SCREAMING_SNAKE_CASE__ : Tuple="<cls>" ,SCREAMING_SNAKE_CASE__ : Tuple="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=["<eop>", "<eod>"] ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,additional_special_tokens=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = 3
__lowerCamelCase : Tuple = do_lower_case
__lowerCamelCase : Any = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : List[str] = vocab_file
__lowerCamelCase : List[Any] = False if not self.vocab_file else True
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : List[Any] = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a =get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : int = AlbertTokenizer
_UpperCAmelCase : Any = AlbertTokenizerFast
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : List[Any] = True
def lowerCAmelCase ( self : Optional[int]):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : Tuple = AlbertTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : Union[str, Any] = 'this is a test'
__lowerCamelCase : Optional[Any] = 'this is a test'
return input_text, output_text
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = '<pad>'
__lowerCamelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'<pad>')
self.assertEqual(vocab_keys[1] ,'<unk>')
self.assertEqual(vocab_keys[-1] ,'▁eloquent')
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,3_0_0_0_0)
def lowerCAmelCase ( self : Tuple):
self.assertEqual(self.get_tokenizer().vocab_size ,3_0_0_0_0)
def lowerCAmelCase ( self : List[str]):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_rust_tokenizer()
__lowerCamelCase : int = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_rust_tokenizer()
__lowerCamelCase : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : List[Any] = AlbertTokenizer(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = tokenizer.tokenize('This is a test')
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,['▁this', '▁is', '▁a', '▁test'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__) ,[4_8, 2_5, 2_1, 1_2_8_9])
__lowerCamelCase : str = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'])
__lowerCamelCase : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9])
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] ,)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[Any] = AlbertTokenizer(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer.encode('sequence builders')
__lowerCamelCase : Tuple = tokenizer.encode('multi-sequence build')
__lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowerCAmelCase ( self : List[str]):
# fmt: off
__lowerCamelCase : Any = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ ,model_name='albert-base-v2' ,revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' ,)
| 73 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a ={
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a ="""ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[str, int]:
__lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = get_letter_count(lowerCamelCase__ )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCamelCase__ )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = ''.join(freq_to_letter[freq] )
__lowerCamelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCamelCase__ , reverse=lowerCamelCase__ )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : str = get_frequency_order(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = WavaVecaForSequenceClassification.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__ )
__lowerCamelCase : str = downstream_dict['projector.weight']
__lowerCamelCase : Optional[int] = downstream_dict['projector.bias']
__lowerCamelCase : Union[str, Any] = downstream_dict['model.post_net.linear.weight']
__lowerCamelCase : Optional[Any] = downstream_dict['model.post_net.linear.bias']
return model
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__ )
__lowerCamelCase : List[str] = downstream_dict['model.linear.weight']
__lowerCamelCase : Dict = downstream_dict['model.linear.bias']
return model
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : List[str] = WavaVecaForXVector.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__ )
__lowerCamelCase : Optional[int] = downstream_dict['connector.weight']
__lowerCamelCase : int = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowerCamelCase : Union[str, Any] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowerCamelCase : Dict = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowerCamelCase : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__lowerCamelCase : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__lowerCamelCase : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__lowerCamelCase : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__lowerCamelCase : int = downstream_dict['objective.W']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : Any = torch.load(lowerCamelCase__ , map_location='cpu' )
__lowerCamelCase : Union[str, Any] = checkpoint['Downstream']
__lowerCamelCase : Dict = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
__lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , do_normalize=lowerCamelCase__ )
__lowerCamelCase : List[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__lowerCamelCase : Tuple = convert_classification(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif arch.endswith('ForAudioFrameClassification' ):
__lowerCamelCase : Optional[int] = convert_diarization(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif arch.endswith('ForXVector' ):
__lowerCamelCase : str = convert_xvector(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowerCamelCase : List[str] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase__ )
hf_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
a =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 73 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.