code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : int = ViTImageProcessor if is_vision_available() else None
@property
def lowerCAmelCase__ ( self: List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = (3, 32, 1_28)
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
__lowerCamelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
__lowerCamelCase = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: int ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , **UpperCamelCase_: List[str] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__lowerCamelCase = Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) )
return image_input
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__lowerCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """test"""
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """test"""
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.char_decode(UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ )
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = None
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = torch.randn(1 , 27 , 38 )
__lowerCamelCase = torch.randn(1 , 27 , 5_02_57 )
__lowerCamelCase = torch.randn(1 , 27 , 3_05_22 )
__lowerCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 80 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Union[str, Any] , UpperCamelCase_: CLIPSegForImageSegmentation , UpperCamelCase_: CLIPSegProcessor , UpperCamelCase_: AutoencoderKL , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_: StableDiffusionSafetyChecker , UpperCamelCase_: CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
__lowerCamelCase = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
__lowerCamelCase = dict(scheduler.config )
__lowerCamelCase = 1
__lowerCamelCase = FrozenDict(UpperCamelCase_ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
__lowerCamelCase = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
__lowerCamelCase = dict(scheduler.config )
__lowerCamelCase = True
__lowerCamelCase = FrozenDict(UpperCamelCase_ )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=UpperCamelCase_ , segmentation_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
self.enable_attention_slicing(UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: List[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase_: str , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 50 , UpperCamelCase_: float = 7.5 , UpperCamelCase_: Optional[Union[str, List[str]]] = None , UpperCamelCase_: Optional[int] = 1 , UpperCamelCase_: float = 0.0 , UpperCamelCase_: Optional[torch.Generator] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , **UpperCamelCase_: Any , ):
__lowerCamelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
__lowerCamelCase = self.segmentation_model(**UpperCamelCase_ )
__lowerCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCamelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , height=UpperCamelCase_ , width=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , output_type=UpperCamelCase_ , return_dict=UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=UpperCamelCase_ , )
| 80 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase_ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def lowerCamelCase__ ( A__ : List[Any] , A__ : Union[str, Any]=None , A__ : str=None , A__ : Any=None ):
'''simple docstring'''
__lowerCamelCase = True
while ask_again:
__lowerCamelCase = input(A__ )
try:
if default is not None and len(A__ ) == 0:
return default
return convert_value(A__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A__ )
def lowerCamelCase__ ( A__ : int , A__ : Optional[Any]=[] , A__ : List[Any]=None , A__ : int=0 ):
'''simple docstring'''
__lowerCamelCase = BulletMenu(A__ , A__ )
__lowerCamelCase = menu.run(default_choice=A__ )
return convert_value(A__ ) if convert_value is not None else result
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase__( argparse.RawDescriptionHelpFormatter):
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] ):
__lowerCamelCase = super()._format_usage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 80 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(A__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(A__ , A__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(A__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
A__ : TreeNode | None , A__ : float , A__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , A__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , A__ )
)
return is_binary_search_tree_recursive_check(A__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
__lowerCamelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(UpperCamelCase_ )
from datasets import load_dataset
__lowerCamelCase = load_dataset("""nielsr/rvlcdip-demo""" )
__lowerCamelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = outputs.logits
__lowerCamelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=UpperCamelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase__ : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict ):
__lowerCamelCase = AudioClassificationPipeline(model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
# test with a raw waveform
__lowerCamelCase = np.zeros((3_40_00,) )
__lowerCamelCase = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase, __lowerCamelCase = examples
__lowerCamelCase = audio_classifier(UpperCamelCase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=1 )
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
self.run_torchaudio(UpperCamelCase_ )
@require_torchaudio
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[Any] ):
import datasets
# test with a local file
__lowerCamelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
__lowerCamelCase = dataset[0]["""audio"""]["""array"""]
__lowerCamelCase = audio_classifier(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
@require_torch
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = """anton-l/wav2vec2-random-tiny-classifier"""
__lowerCamelCase = pipeline("""audio-classification""" , model=UpperCamelCase_ )
__lowerCamelCase = np.ones((80_00,) )
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 )
__lowerCamelCase = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
__lowerCamelCase = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__lowerCamelCase = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCAmelCase__ ( self: str ):
import datasets
__lowerCamelCase = """superb/wav2vec2-base-superb-ks"""
__lowerCamelCase = pipeline("""audio-classification""" , model=UpperCamelCase_ )
__lowerCamelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
__lowerCamelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
| 80 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 1 |
def lowerCamelCase__ ( A__ : list , A__ : int , A__ : int = 0 , A__ : int = 0 ):
'''simple docstring'''
__lowerCamelCase = right or len(A__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(A__ , A__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 1 |
def lowerCamelCase__ ( A__ : int = 10 , A__ : int = 1000 , A__ : bool = True ):
'''simple docstring'''
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and isinstance(A__ , A__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
assert (
isinstance(A__ , A__ ) and isinstance(A__ , A__ ) and isinstance(A__ , A__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(A__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
__lowerCamelCase = lower
__lowerCamelCase = higher
__lowerCamelCase = []
while True:
__lowerCamelCase = get_avg(A__ , A__ )
last_numbers.append(A__ )
if answer(A__ ) == "low":
__lowerCamelCase = number
elif answer(A__ ) == "high":
__lowerCamelCase = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter lower value : """ ).strip() )
__lowerCamelCase = int(input("""Enter high value : """ ).strip() )
__lowerCamelCase = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 80 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCAmelCase_ = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCAmelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase__:
def __init__( self: List[Any] ):
__lowerCamelCase = WATERMARK_BITS
__lowerCamelCase = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
__lowerCamelCase = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCamelCase = [self.encoder.encode(UpperCamelCase_ , """dwtDct""" ) for image in images]
__lowerCamelCase = torch.from_numpy(np.array(UpperCamelCase_ ) ).permute(0 , 3 , 1 , 2 )
__lowerCamelCase = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 80 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase_ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: bool , UpperCamelCase_: str = None , UpperCamelCase_: list = None ):
__lowerCamelCase = None
__lowerCamelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowerCamelCase = os.path.abspath("""examples""" )
for item in os.listdir(UpperCamelCase_ ):
if item not in EXCLUDE_EXAMPLES:
__lowerCamelCase = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase_ , feature_script=UpperCamelCase_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowerCamelCase = compare_against_test(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = """\n""".join(UpperCamelCase_ )
if special_strings is not None:
for string in special_strings:
__lowerCamelCase = diff.replace(UpperCamelCase_ , """""" )
self.assertEqual(UpperCamelCase_ , """""" )
def lowerCAmelCase__ ( self: str ):
self.one_complete_example("""complete_nlp_example.py""" , UpperCamelCase_ )
self.one_complete_example("""complete_nlp_example.py""" , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowerCamelCase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.one_complete_example("""complete_cv_example.py""" , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'})
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = False
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
super().setUpClass()
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowerCamelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls: int ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__lowerCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__lowerCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ )
self.assertNotIn("""epoch 0:""" , UpperCamelCase_ )
self.assertIn("""epoch 1:""" , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__lowerCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ )
if torch.cuda.is_available():
__lowerCamelCase = torch.cuda.device_count()
else:
__lowerCamelCase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , UpperCamelCase_ )
self.assertIn("""epoch 1:""" , UpperCamelCase_ )
else:
self.assertIn("""epoch 0:""" , UpperCamelCase_ )
self.assertIn("""epoch 1:""" , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowerCamelCase = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ )
__lowerCamelCase = re.findall("""({.+})""" , UpperCamelCase_ )
__lowerCamelCase = [r for r in results if """accuracy""" in r][-1]
__lowerCamelCase = ast.literal_eval(UpperCamelCase_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self: Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
__lowerCamelCase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """tracking""" ) ) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 80 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
def lowerCamelCase__ ( A__ : int = 1000 ):
'''simple docstring'''
__lowerCamelCase = 2**power
__lowerCamelCase = str(A__ )
__lowerCamelCase = list(A__ )
__lowerCamelCase = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCAmelCase_ = solution(power)
print('Sum of the digits is: ', result)
| 80 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] ):
'''simple docstring'''
return len(set(A__ ) ) == len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 1 |
import torch
def lowerCamelCase__ ( ):
'''simple docstring'''
if torch.cuda.is_available():
__lowerCamelCase = torch.cuda.device_count()
else:
__lowerCamelCase = 0
print(f'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 80 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = 'sew'
def __init__( self: Union[str, Any] , UpperCamelCase_: int=32 , UpperCamelCase_: Optional[Any]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Optional[Any]=30_72 , UpperCamelCase_: Dict=2 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Tuple=1E-5 , UpperCamelCase_: Optional[int]="group" , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[int]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_: List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_: Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_: List[str]=False , UpperCamelCase_: Optional[int]=1_28 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=True , UpperCamelCase_: Tuple=0.05 , UpperCamelCase_: List[Any]=10 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: List[Any]=10 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Union[str, Any]="mean" , UpperCamelCase_: Dict=False , UpperCamelCase_: str=False , UpperCamelCase_: str=2_56 , UpperCamelCase_: str=0 , UpperCamelCase_: int=1 , UpperCamelCase_: Union[str, Any]=2 , **UpperCamelCase_: str , ):
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 80 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : Dict = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : str = frozenset([])
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 80 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
UpperCAmelCase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
UpperCAmelCase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
UpperCAmelCase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
UpperCAmelCase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = 'vit_msn'
def __init__( self: Union[str, Any] , UpperCamelCase_: Any=7_68 , UpperCamelCase_: str=12 , UpperCamelCase_: Dict=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=1E-06 , UpperCamelCase_: Union[str, Any]=2_24 , UpperCamelCase_: int=16 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Dict=True , **UpperCamelCase_: Dict , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
| 80 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 1 |
def lowerCamelCase__ ( A__ : float ):
'''simple docstring'''
return 10 - x * x
def lowerCamelCase__ ( A__ : float , A__ : float ):
'''simple docstring'''
if equation(A__ ) * equation(A__ ) >= 0:
raise ValueError("""Wrong space!""" )
__lowerCamelCase = a
while (b - a) >= 0.01:
# Find middle point
__lowerCamelCase = (a + b) / 2
# Check if middle point is root
if equation(A__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A__ ) * equation(A__ ) < 0:
__lowerCamelCase = c
else:
__lowerCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['PerceiverFeatureExtractor']
UpperCAmelCase_ = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCAmelCase_ = 0
UpperCAmelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCAmelCase_ = tuple[int, int]
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: Node | None , ):
__lowerCamelCase = pos_x
__lowerCamelCase = pos_y
__lowerCamelCase = (pos_y, pos_x)
__lowerCamelCase = goal_x
__lowerCamelCase = goal_y
__lowerCamelCase = g_cost
__lowerCamelCase = parent
__lowerCamelCase = self.calculate_heuristic()
__lowerCamelCase = self.g_cost + self.h_cost
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.pos_x - self.goal_x
__lowerCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase_ ) + abs(UpperCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: str , UpperCamelCase_: Node ):
return self.f_cost < other.f_cost
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: TPosition , UpperCamelCase_: TPosition ):
__lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ )
__lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , UpperCamelCase_ )
__lowerCamelCase = [self.start]
__lowerCamelCase = []
__lowerCamelCase = False
def lowerCAmelCase__ ( self: List[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase_ )
self.closed_nodes.append(UpperCamelCase_ )
__lowerCamelCase = self.get_successors(UpperCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
__lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase_ )
else:
self.open_nodes.append(UpperCamelCase_ )
return [self.start.pos]
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Node ):
__lowerCamelCase = []
for action in delta:
__lowerCamelCase = parent.pos_x + action[1]
__lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) )
return successors
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Node | None ):
__lowerCamelCase = node
__lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCamelCase = current_node.parent
path.reverse()
return path
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: TPosition , UpperCamelCase_: TPosition ):
__lowerCamelCase = AStar(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = AStar(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = False
def lowerCAmelCase__ ( self: Dict ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCamelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase_ , UpperCamelCase_ )
self.fwd_astar.closed_nodes.append(UpperCamelCase_ )
self.bwd_astar.closed_nodes.append(UpperCamelCase_ )
__lowerCamelCase = current_bwd_node
__lowerCamelCase = current_fwd_node
__lowerCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
__lowerCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase_ )
else:
astar.open_nodes.append(UpperCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Node , UpperCamelCase_: Node ):
__lowerCamelCase = self.fwd_astar.retrace_path(UpperCamelCase_ )
__lowerCamelCase = self.bwd_astar.retrace_path(UpperCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = AStar(init, goal)
UpperCAmelCase_ = a_star.search()
UpperCAmelCase_ = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = BidirectionalAStar(init, goal)
UpperCAmelCase_ = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 80 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
UpperCAmelCase__ : Tuple = 'ssube/stable-diffusion-x4-upscaler-onnx'
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int]=0 ):
__lowerCamelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCamelCase_ ) )
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
@property
def lowerCAmelCase__ ( self: Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = """A fantasy landscape, trending on artstation"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase = init_image.resize((1_28, 1_28) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = """A fantasy landscape, trending on artstation"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 80 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 1 |
from __future__ import annotations
import numpy as np
def lowerCamelCase__ ( A__ : list[float] ):
'''simple docstring'''
return np.maximum(0 , A__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 80 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 1 |
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 1 |
from collections import deque
class lowerCamelCase__:
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: int ):
__lowerCamelCase = process_name # process name
__lowerCamelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowerCamelCase = arrival_time
__lowerCamelCase = burst_time # remaining burst time
__lowerCamelCase = 0 # total time of the process wait in ready queue
__lowerCamelCase = 0 # time from arrival time to completion time
class lowerCamelCase__:
def __init__( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: list[int] , UpperCamelCase_: deque[Process] , UpperCamelCase_: int , ):
# total number of mlfq's queues
__lowerCamelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__lowerCamelCase = time_slices
# unfinished process is in this ready_queue
__lowerCamelCase = queue
# current time
__lowerCamelCase = current_time
# finished process is in this sequence queue
__lowerCamelCase = deque()
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: list[Process] ):
__lowerCamelCase = []
for i in range(len(UpperCamelCase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: list[Process] ):
__lowerCamelCase = []
for i in range(len(UpperCamelCase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: list[Process] ):
__lowerCamelCase = []
for i in range(len(UpperCamelCase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: deque[Process] ):
return [q.burst_time for q in queue]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCAmelCase__ ( self: int , UpperCamelCase_: deque[Process] ):
__lowerCamelCase = deque() # sequence deque of finished process
while len(UpperCamelCase_ ) != 0:
__lowerCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowerCamelCase = 0
# set the process's turnaround time because it is finished
__lowerCamelCase = self.current_time - cp.arrival_time
# set the completion time
__lowerCamelCase = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: deque[Process] , UpperCamelCase_: int ):
__lowerCamelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase_ ) ):
__lowerCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowerCamelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowerCamelCase = 0
# set the finish time
__lowerCamelCase = self.current_time
# update the process' turnaround time because it is finished
__lowerCamelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCAmelCase__ ( self: List[str] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__lowerCamelCase, __lowerCamelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCAmelCase_ = Process('P1', 0, 53)
UpperCAmelCase_ = Process('P2', 0, 17)
UpperCAmelCase_ = Process('P3', 0, 68)
UpperCAmelCase_ = Process('P4', 0, 24)
UpperCAmelCase_ = 3
UpperCAmelCase_ = [17, 25]
UpperCAmelCase_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
UpperCAmelCase_ = Process('P1', 0, 53)
UpperCAmelCase_ = Process('P2', 0, 17)
UpperCAmelCase_ = Process('P3', 0, 68)
UpperCAmelCase_ = Process('P4', 0, 24)
UpperCAmelCase_ = 3
UpperCAmelCase_ = [17, 25]
UpperCAmelCase_ = deque([Pa, Pa, Pa, Pa])
UpperCAmelCase_ = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCAmelCase_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 80 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = 'lxmert'
UpperCAmelCase__ : Any = {}
def __init__( self: Dict , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Tuple=7_68 , UpperCamelCase_: List[Any]=12 , UpperCamelCase_: Any=95_00 , UpperCamelCase_: Tuple=16_00 , UpperCamelCase_: Optional[int]=4_00 , UpperCamelCase_: Union[str, Any]=30_72 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=5_12 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: int=1E-12 , UpperCamelCase_: List[Any]=9 , UpperCamelCase_: Any=5 , UpperCamelCase_: Optional[Any]=5 , UpperCamelCase_: Optional[Any]=20_48 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Dict=6.67 , UpperCamelCase_: Dict=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: int=True , UpperCamelCase_: Dict=True , **UpperCamelCase_: Tuple , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**UpperCamelCase_ )
| 80 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCAmelCase_ = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
UpperCAmelCase_ = json.load(f)
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[Any] ):
return FSMTTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int ):
__lowerCamelCase = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__lowerCamelCase = F'facebook/wmt19-{pair}'
__lowerCamelCase = self.get_tokenizer(UpperCamelCase_ )
__lowerCamelCase = self.get_model(UpperCamelCase_ )
__lowerCamelCase = bleu_data[pair]["""src"""]
__lowerCamelCase = bleu_data[pair]["""tgt"""]
__lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""pt""" , truncation=UpperCamelCase_ , padding="""longest""" ).to(UpperCamelCase_ )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__lowerCamelCase = tokenizer.batch_decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
__lowerCamelCase = calculate_bleu(UpperCamelCase_ , UpperCamelCase_ )
print(UpperCamelCase_ )
self.assertGreaterEqual(scores["""bleu"""] , UpperCamelCase_ )
| 80 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 1 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase_ = 637_8137.0
UpperCAmelCase_ = 635_6752.31_4245
UpperCAmelCase_ = 6_378_137
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
__lowerCamelCase = (AXIS_A - AXIS_B) / AXIS_A
__lowerCamelCase = atan((1 - flattening) * tan(radians(A__ ) ) )
__lowerCamelCase = atan((1 - flattening) * tan(radians(A__ ) ) )
__lowerCamelCase = radians(A__ )
__lowerCamelCase = radians(A__ )
# Equation
__lowerCamelCase = sin((phi_a - phi_a) / 2 )
__lowerCamelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowerCamelCase = sqrt(sin_sq_phi + (cos(A__ ) * cos(A__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : bool , A__ : bool ):
'''simple docstring'''
def run_func(A__ : Optional[Any] ):
@wraps(A__ )
def run_in_eager_mode(*A__ : List[Any] , **A__ : Any ):
return func(*A__ , **A__ )
@wraps(A__ )
@tf.function(experimental_compile=A__ )
def run_in_graph_mode(*A__ : Dict , **A__ : Union[str, Any] ):
return func(*A__ , **A__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = random.Random()
__lowerCamelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCAmelCase__ ( self: Any ):
return tf.__version__
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: int ):
# initialize GPU on separate process
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowerCamelCase = self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: int ):
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowerCamelCase = self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowerCamelCase = self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__lowerCamelCase = self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: int ):
__lowerCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__lowerCamelCase = (
hasattr(UpperCamelCase_ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase = __import__("""transformers""" , fromlist=[model_class] )
__lowerCamelCase = getattr(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__lowerCamelCase = TF_MODEL_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
__lowerCamelCase = config.vocab_size if hasattr(UpperCamelCase_ , """vocab_size""" ) else config.encoder.vocab_size
__lowerCamelCase = random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , training=UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase_ , training=UpperCamelCase_ )
__lowerCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: int ):
__lowerCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__lowerCamelCase = (
hasattr(UpperCamelCase_ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase = __import__("""transformers""" , fromlist=[model_class] )
__lowerCamelCase = getattr(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__lowerCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
__lowerCamelCase = config.vocab_size if hasattr(UpperCamelCase_ , """vocab_size""" ) else config.encoder.vocab_size
__lowerCamelCase = random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__lowerCamelCase = model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
__lowerCamelCase = tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__lowerCamelCase = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
__lowerCamelCase = tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
__lowerCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(UpperCamelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowerCamelCase = timeit.repeat(
UpperCamelCase_ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__lowerCamelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__lowerCamelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__lowerCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__lowerCamelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase_ )
__lowerCamelCase = meminfo.used
__lowerCamelCase = Memory(UpperCamelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__lowerCamelCase = None
else:
__lowerCamelCase = measure_peak_memory_cpu(UpperCamelCase_ )
__lowerCamelCase = Memory(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowerCamelCase = stop_memory_tracing(UpperCamelCase_ )
if memory is None:
__lowerCamelCase = summary.total
else:
__lowerCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 80 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Tuple ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())})
UpperCAmelCase__ : str = field(metadata={'help': 'Should contain the data files for the task.'})
UpperCAmelCase__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , A__ )
# Set seed
set_seed(training_args.seed )
try:
__lowerCamelCase = processors[data_args.task_name]()
__lowerCamelCase = processor.get_labels()
__lowerCamelCase = len(A__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A__ : EvalPrediction ) -> Dict:
__lowerCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A__ , p.label_ids )}
# Data collator
__lowerCamelCase = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase = Trainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(A__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , A__ , A__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(A__ )
return results
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 80 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
def lowerCamelCase__ ( A__ : int = 100 ):
'''simple docstring'''
__lowerCamelCase = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Optional[Any]=10 , UpperCamelCase_: str=3 , UpperCamelCase_: Optional[int]=32 * 4 , UpperCamelCase_: Optional[Any]=32 * 6 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: int=32 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = mask_feature_size
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self: Dict ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Tuple=False ):
with torch.no_grad():
__lowerCamelCase = MaskFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ):
__lowerCamelCase = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(UpperCamelCase_: Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
__lowerCamelCase = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = MaskFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: str ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowerCAmelCase__ ( self: int ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowerCAmelCase__ ( self: List[str] ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: Any ):
pass
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCamelCase = MaskFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCamelCase_ ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(),
}
__lowerCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ )
__lowerCamelCase = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
__lowerCamelCase = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self: Union[str, Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
__lowerCamelCase = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase__ ( self: List[str] ):
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
__lowerCamelCase = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ = 1E-4
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: str ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCamelCase_ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
__lowerCamelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
__lowerCamelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
__lowerCamelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
__lowerCamelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
__lowerCamelCase = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCamelCase_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
__lowerCamelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
__lowerCamelCase = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowerCamelCase = inputs["""pixel_values"""].to(UpperCamelCase_ )
__lowerCamelCase = [el.to(UpperCamelCase_ ) for el in inputs["""mask_labels"""]]
__lowerCamelCase = [el.to(UpperCamelCase_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 80 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 1 |
from maths.prime_check import is_prime
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
__lowerCamelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(A__ )
if is_prime(A__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Dict = StableDiffusionLDMaDPipeline
UpperCAmelCase__ : str = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self: Union[str, Any] ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = output.rgb, output.depth
__lowerCamelCase = rgb[0, -3:, -3:, -1]
__lowerCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__lowerCamelCase = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
__lowerCamelCase = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = 3 * [inputs["""prompt"""]]
# forward
__lowerCamelCase = ldmad_pipe(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = output.rgb, output.depth
__lowerCamelCase = rgb_slice_a[0, -3:, -3:, -1]
__lowerCamelCase = depth_slice_a[0, -3:, -1]
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = 3 * [inputs.pop("""prompt""" )]
__lowerCamelCase = ldmad_pipe.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs["""input_ids"""].to(UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe.text_encoder(UpperCamelCase_ )[0]
__lowerCamelCase = prompt_embeds
# forward
__lowerCamelCase = ldmad_pipe(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = output.rgb, output.depth
__lowerCamelCase = rgb_slice_a[0, -3:, -3:, -1]
__lowerCamelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
__lowerCamelCase = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = """french fries"""
__lowerCamelCase = ldmad_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = output.rgb, output.depth
__lowerCamelCase = rgb[0, -3:, -3:, -1]
__lowerCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__lowerCamelCase = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
__lowerCamelCase = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any]="cpu" , UpperCamelCase_: int=torch.floataa , UpperCamelCase_: Union[str, Any]=0 ):
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
__lowerCamelCase = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_inputs(UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = output.rgb, output.depth
__lowerCamelCase = rgb[0, -3:, -3:, -1].flatten()
__lowerCamelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
__lowerCamelCase = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
__lowerCamelCase = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: int="cpu" , UpperCamelCase_: List[Any]=torch.floataa , UpperCamelCase_: Optional[int]=0 ):
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_inputs(UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = output.rgb, output.depth
__lowerCamelCase = 0.49_5586
__lowerCamelCase = 0.3379_5515
__lowerCamelCase = 112.4_8518
__lowerCamelCase = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_inputs(UpperCamelCase_ )
__lowerCamelCase = ldmad_pipe(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = output.rgb, output.depth
__lowerCamelCase = 0.419_4127
__lowerCamelCase = 0.3537_5586
__lowerCamelCase = 0.563_8502
__lowerCamelCase = 0.3468_6103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 80 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCamelCase__ ( A__ : Any="ro" , A__ : Dict="en" , A__ : Dict="wmt16" , A__ : List[Any]=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__lowerCamelCase = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
__lowerCamelCase = datasets.load_dataset(A__ , A__ )
if save_dir is None:
__lowerCamelCase = f'{dataset}-{pair}'
__lowerCamelCase = Path(A__ )
save_dir.mkdir(exist_ok=A__ )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
__lowerCamelCase = """val""" if split == """validation""" else split
__lowerCamelCase = save_dir.joinpath(f'{fn}.source' )
__lowerCamelCase = save_dir.joinpath(f'{fn}.target' )
__lowerCamelCase = src_path.open("""w+""" )
__lowerCamelCase = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowerCamelCase = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 80 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Dict , A__ : Union[str, Any] , A__ : Any , A__ : Dict ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple=None ):
__lowerCamelCase = start
__lowerCamelCase = end
__lowerCamelCase = val
__lowerCamelCase = (start + end) // 2
__lowerCamelCase = left
__lowerCamelCase = right
def __repr__( self: Tuple ):
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowerCamelCase__:
def __init__( self: int , UpperCamelCase_: Sequence , UpperCamelCase_: str ):
__lowerCamelCase = collection
__lowerCamelCase = function
if self.collection:
__lowerCamelCase = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Dict ):
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ):
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict ):
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
__lowerCamelCase = (start + end) // 2
__lowerCamelCase = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
if node.start == i and node.end == i:
__lowerCamelCase = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.fn(node.left.val , node.right.val )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Dict ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
if self.root is not None:
__lowerCamelCase = Queue()
queue.put(self.root )
while not queue.empty():
__lowerCamelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
UpperCAmelCase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 80 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
UpperCAmelCase__ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
UpperCAmelCase__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.task_name.lower()
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'train'
UpperCAmelCase__ : int = 'dev'
UpperCAmelCase__ : List[str] = 'test'
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self: Any , UpperCamelCase_: GlueDataTrainingArguments , UpperCamelCase_: PreTrainedTokenizerBase , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Union[str, Split] = Split.train , UpperCamelCase_: Optional[str] = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCamelCase_ , )
__lowerCamelCase = args
__lowerCamelCase = glue_processors[args.task_name]()
__lowerCamelCase = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
try:
__lowerCamelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowerCamelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase, __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + """.lock"""
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
__lowerCamelCase = time.time()
__lowerCamelCase = torch.load(UpperCamelCase_ )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowerCamelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowerCamelCase = self.processor.get_test_examples(args.data_dir )
else:
__lowerCamelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowerCamelCase = examples[:limit_length]
__lowerCamelCase = glue_convert_examples_to_features(
UpperCamelCase_ , UpperCamelCase_ , max_length=args.max_seq_length , label_list=UpperCamelCase_ , output_mode=self.output_mode , )
__lowerCamelCase = time.time()
torch.save(self.features , UpperCamelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self: str ):
return len(self.features )
def __getitem__( self: Optional[int] , UpperCamelCase_: Optional[int] ):
return self.features[i]
def lowerCAmelCase__ ( self: Any ):
return self.label_list
| 80 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCAmelCase_ = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCAmelCase_ = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCAmelCase_ = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 1 |
def lowerCamelCase__ ( A__ : int = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 80 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 1 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 1 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = FlaxAutoencoderKL
@property
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = 4
__lowerCamelCase = 3
__lowerCamelCase = (32, 32)
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.uniform(UpperCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCAmelCase_ = None
UpperCAmelCase_ = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCAmelCase_ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase__ : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
UpperCAmelCase__ : str = field(default='Image' , init=__lowerCamelCase , repr=__lowerCamelCase)
def __call__( self: Any ):
return self.pa_type
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = np.array(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(UpperCamelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCamelCase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: dict , UpperCamelCase_: Tuple=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__lowerCamelCase = {}
__lowerCamelCase, __lowerCamelCase = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(UpperCamelCase_ ):
__lowerCamelCase = PIL.Image.open(UpperCamelCase_ )
else:
__lowerCamelCase = path.split("""::""" )[-1]
try:
__lowerCamelCase = string_to_dict(UpperCamelCase_ , config.HUB_DATASETS_URL )["""repo_id"""]
__lowerCamelCase = token_per_repo_id.get(UpperCamelCase_ )
except ValueError:
__lowerCamelCase = None
with xopen(UpperCamelCase_ , """rb""" , use_auth_token=UpperCamelCase_ ) as f:
__lowerCamelCase = BytesIO(f.read() )
__lowerCamelCase = PIL.Image.open(bytes_ )
else:
__lowerCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase__ ( self: List[str] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__lowerCamelCase = pa.array([None] * len(UpperCamelCase_ ) , type=pa.binary() )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCamelCase = pa.array([None] * len(UpperCamelCase_ ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__lowerCamelCase = storage.field("""bytes""" )
else:
__lowerCamelCase = pa.array([None] * len(UpperCamelCase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__lowerCamelCase = storage.field("""path""" )
else:
__lowerCamelCase = pa.array([None] * len(UpperCamelCase_ ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowerCamelCase = pa.array(
[encode_np_array(np.array(UpperCamelCase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowerCamelCase = pa.array([None] * len(UpperCamelCase_ ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCamelCase_ , self.pa_type )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase_: Optional[int] ):
with xopen(UpperCamelCase_ , """rb""" ) as f:
__lowerCamelCase = f.read()
return bytes_
__lowerCamelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCamelCase = pa.array(
[os.path.basename(UpperCamelCase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCamelCase_ , self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowerCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( A__ : "PIL.Image.Image" ):
'''simple docstring'''
__lowerCamelCase = BytesIO()
if image.format in list_image_compression_formats():
__lowerCamelCase = image.format
else:
__lowerCamelCase = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(A__ , format=A__ )
return buffer.getvalue()
def lowerCamelCase__ ( A__ : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(A__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(A__ )}
def lowerCamelCase__ ( A__ : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__lowerCamelCase = array.dtype
__lowerCamelCase = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__lowerCamelCase = dtype.kind
__lowerCamelCase = dtype.itemsize
__lowerCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowerCamelCase = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowerCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowerCamelCase = dtype_byteorder + dtype_kind + str(A__ )
__lowerCamelCase = np.dtype(A__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
__lowerCamelCase = PIL.Image.fromarray(array.astype(A__ ) )
return {"path": None, "bytes": image_to_bytes(A__ )}
def lowerCamelCase__ ( A__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__lowerCamelCase, __lowerCamelCase = first_non_null_value(A__ )
if isinstance(A__ , A__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(A__ , np.ndarray ):
__lowerCamelCase = no_op_if_value_is_null(A__ )
return [obj_to_image_dict_func(A__ ) for obj in objs]
elif isinstance(A__ , PIL.Image.Image ):
__lowerCamelCase = no_op_if_value_is_null(A__ )
return [obj_to_image_dict_func(A__ ) for obj in objs]
else:
return objs
else:
return objs
| 80 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 1 |
from __future__ import annotations
UpperCAmelCase_ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( A__ : Matrix , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(A__ ):
__lowerCamelCase, __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A__ , A__ , A__ , A__ ):
__lowerCamelCase = digit
if sudoku(A__ ) is not None:
return grid
__lowerCamelCase = 0
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(A__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 80 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 1 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
from math import sqrt
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( A__ : int = 10001 ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(A__ ):
count += 1
while count != nth:
number += 2
if is_prime(A__ ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: Distribution , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = 1.0 if scale is None else scale
__lowerCamelCase = 0.0 if loc is None else loc
super().__init__(UpperCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase_ )] )
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__ ( self: List[str] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return self.variance.sqrt()
class lowerCamelCase__( nn.Module):
def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Callable[..., Tuple[torch.Tensor]] , **UpperCamelCase_: Optional[Any] ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = args_dim
__lowerCamelCase = nn.ModuleList([nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) for dim in args_dim.values()] )
__lowerCamelCase = domain_map
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = [proj(UpperCamelCase_ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase_ )
class lowerCamelCase__( nn.Module):
def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any] ):
super().__init__()
__lowerCamelCase = function
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: List[Any] ):
return self.function(UpperCamelCase_ , *UpperCamelCase_ )
class lowerCamelCase__:
UpperCAmelCase__ : type
UpperCAmelCase__ : int
UpperCAmelCase__ : Dict[str, int]
def __init__( self: Optional[Any] , UpperCamelCase_: int = 1 ):
__lowerCamelCase = dim
__lowerCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Tuple ):
if self.dim == 1:
return self.distribution_class(*UpperCamelCase_ )
else:
return Independent(self.distribution_class(*UpperCamelCase_ ) , 1 )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None , ):
__lowerCamelCase = self._base_distribution(UpperCamelCase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase_ , loc=UpperCamelCase_ , scale=UpperCamelCase_ , event_dim=self.event_dim )
@property
def lowerCAmelCase__ ( self: Tuple ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__ ( self: str ):
return len(self.event_shape )
@property
def lowerCAmelCase__ ( self: Dict ):
return 0.0
def lowerCAmelCase__ ( self: int , UpperCamelCase_: int ):
return ParameterProjection(
in_features=UpperCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__ ( self: Optional[int] , *UpperCamelCase_: torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: torch.Tensor ):
return (x + torch.sqrt(torch.square(UpperCamelCase_ ) + 4.0 )) / 2.0
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
UpperCAmelCase__ : type = StudentT
@classmethod
def lowerCAmelCase__ ( cls: Dict , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = cls.squareplus(UpperCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCamelCase = 2.0 + cls.squareplus(UpperCamelCase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict[str, int] = {"loc": 1, "scale": 1}
UpperCAmelCase__ : type = Normal
@classmethod
def lowerCAmelCase__ ( cls: int , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = cls.squareplus(UpperCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict[str, int] = {"total_count": 1, "logits": 1}
UpperCAmelCase__ : type = NegativeBinomial
@classmethod
def lowerCAmelCase__ ( cls: Any , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = cls.squareplus(UpperCamelCase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase, __lowerCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase_ , logits=UpperCamelCase_ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase_ , logits=UpperCamelCase_ ) , 1 )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None ):
__lowerCamelCase, __lowerCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 80 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 1 |
def lowerCamelCase__ ( A__ : bytes ):
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : str , A__ : List[str] , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : List[str] , A__ : Any , A__ : int , A__ : Optional[Any] , A__ : Tuple=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : List[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : Any , A__ : Union[str, Any] , A__ : List[str] , A__ : Tuple=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 80 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
UpperCAmelCase_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
UpperCAmelCase_ = frozenset(['prompt', 'negative_prompt'])
UpperCAmelCase_ = frozenset([])
UpperCAmelCase_ = frozenset(['image'])
UpperCAmelCase_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
UpperCAmelCase_ = frozenset(['image'])
UpperCAmelCase_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
UpperCAmelCase_ = frozenset(['prompt', 'image', 'negative_prompt'])
UpperCAmelCase_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
UpperCAmelCase_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
UpperCAmelCase_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
UpperCAmelCase_ = frozenset(['image', 'mask_image'])
UpperCAmelCase_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
UpperCAmelCase_ = frozenset(['example_image', 'image', 'mask_image'])
UpperCAmelCase_ = frozenset(['class_labels'])
UpperCAmelCase_ = frozenset(['class_labels'])
UpperCAmelCase_ = frozenset(['batch_size'])
UpperCAmelCase_ = frozenset([])
UpperCAmelCase_ = frozenset(['batch_size'])
UpperCAmelCase_ = frozenset([])
UpperCAmelCase_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
UpperCAmelCase_ = frozenset(['prompt', 'negative_prompt'])
UpperCAmelCase_ = frozenset(['input_tokens'])
UpperCAmelCase_ = frozenset(['input_tokens'])
| 80 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'facebook/nllb-200-distilled-600M'
UpperCAmelCase__ : Union[str, Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCAmelCase__ : Dict = 'translator'
UpperCAmelCase__ : str = AutoTokenizer
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM
UpperCAmelCase__ : str = LANGUAGE_CODES
UpperCAmelCase__ : Optional[Any] = ['text', 'text', 'text']
UpperCAmelCase__ : Dict = ['text']
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] ):
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__lowerCamelCase = self.lang_to_code[src_lang]
__lowerCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase_ , return_tensors="""pt""" , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[Any] ):
return self.model.generate(**UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase_ )
| 80 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Any , UpperCamelCase_: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
__lowerCamelCase = nn.ModuleList(UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[torch.Tensor, float, int] , UpperCamelCase_: torch.Tensor , UpperCamelCase_: List[torch.tensor] , UpperCamelCase_: List[float] , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[Dict[str, Any]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ , self.nets ) ):
__lowerCamelCase, __lowerCamelCase = controlnet(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# merge samples
if i == 0:
__lowerCamelCase, __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase_ , UpperCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, os.PathLike] , UpperCamelCase_: bool = True , UpperCamelCase_: Callable = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase_ , is_main_process=UpperCamelCase_ , save_function=UpperCamelCase_ , safe_serialization=UpperCamelCase_ , variant=UpperCamelCase_ , )
idx += 1
__lowerCamelCase = model_path_to_save + F'_{idx}'
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Optional[Union[str, os.PathLike]] , **UpperCamelCase_: Any ):
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = ControlNetModel.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
controlnets.append(UpperCamelCase_ )
idx += 1
__lowerCamelCase = pretrained_model_path + F'_{idx}'
logger.info(F'{len(UpperCamelCase_ )} controlnets loaded from {pretrained_model_path}.' )
if len(UpperCamelCase_ ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(UpperCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(UpperCamelCase_ )
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
from itertools import product
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = sides_number
__lowerCamelCase = max_face_number * dice_number
__lowerCamelCase = [0] * (max_total + 1)
__lowerCamelCase = 1
__lowerCamelCase = range(A__ , max_face_number + 1 )
for dice_numbers in product(A__ , repeat=A__ ):
__lowerCamelCase = sum(A__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowerCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowerCamelCase = 0
__lowerCamelCase = 9
__lowerCamelCase = 4 * 9
__lowerCamelCase = 6
for peter_total in range(A__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowerCamelCase = (4**9) * (6**6)
__lowerCamelCase = peter_wins_count / total_games_number
__lowerCamelCase = round(A__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = 'gpt_neox'
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict=5_04_32 , UpperCamelCase_: List[Any]=61_44 , UpperCamelCase_: Any=44 , UpperCamelCase_: Union[str, Any]=64 , UpperCamelCase_: Union[str, Any]=2_45_76 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Optional[int]=0.25 , UpperCamelCase_: Tuple=1_00_00 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=20_48 , UpperCamelCase_: int=0.02 , UpperCamelCase_: int=1E-5 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=0 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=False , UpperCamelCase_: Dict=True , UpperCamelCase_: Any=None , **UpperCamelCase_: Tuple , ):
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
__lowerCamelCase = classifier_dropout
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = tie_word_embeddings
__lowerCamelCase = use_parallel_residual
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCAmelCase__ ( self: Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
__lowerCamelCase = self.rope_scaling.get("""type""" , UpperCamelCase_ )
__lowerCamelCase = self.rope_scaling.get("""factor""" , UpperCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 1 |
import sys
from collections import defaultdict
class lowerCamelCase__:
def __init__( self: Optional[int] ):
__lowerCamelCase = []
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ):
return self.node_position[vertex]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int ):
__lowerCamelCase = pos
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase, __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase, __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase, __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase_ )
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , UpperCamelCase_ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(UpperCamelCase_ , UpperCamelCase_ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(UpperCamelCase_ , 0 )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ):
__lowerCamelCase = len(UpperCamelCase_ ) // 2 - 1
for i in range(UpperCamelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ):
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) , UpperCamelCase_ )
return temp
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(A__ )
__lowerCamelCase = [-1] * len(A__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(A__ ) ):
distance_tv.append(sys.maxsize )
positions.append(A__ )
heap.node_position.append(A__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(A__ , A__ )
for _ in range(1 , len(A__ ) ):
__lowerCamelCase = heap.delete_minimum(A__ , A__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
A__ , heap.get_position(A__ ) , A__ , A__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase_ = int(input('Enter number of edges: ').strip())
UpperCAmelCase_ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 80 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 1 |
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return "".join(chr(ord(A__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str]=0 ):
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = """french fries"""
__lowerCamelCase = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = [inputs["""prompt"""]] * 2
__lowerCamelCase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
__lowerCamelCase = image / 2 + 0.5
__lowerCamelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCamelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__lowerCamelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [round(UpperCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(UpperCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = VaeImageProcessor(do_resize=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type="""pt""" ) )[0]
__lowerCamelCase = components["""vae"""]
__lowerCamelCase = self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCamelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCamelCase = pipe(**UpperCamelCase_ )[0]
__lowerCamelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase_ , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str=0 ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__lowerCamelCase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ )
__lowerCamelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def callback_fn(UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor ) -> None:
__lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCamelCase = latents[0, -3:, -3:, -1]
__lowerCamelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCamelCase = latents[0, -3:, -3:, -1]
__lowerCamelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__lowerCamelCase = False
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self: List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCamelCase = inputs["""image"""].resize((5_04, 5_04) )
__lowerCamelCase = """timbrooks/instruct-pix2pix"""
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = pipe(**UpperCamelCase_ )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
__lowerCamelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 80 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'lilt'
def __init__( self: Dict , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: List[str]=7_68 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Optional[Any]=30_72 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Dict=5_12 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: List[Any]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Dict=10_24 , **UpperCamelCase_: Optional[int] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = classifier_dropout
__lowerCamelCase = channel_shrink_ratio
__lowerCamelCase = max_ad_position_embeddings
| 80 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 1 |
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [0] * len(A__ )
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A__ ) ):
if indegree[i] == 0:
queue.append(A__ )
while queue:
__lowerCamelCase = queue.pop(0 )
cnt += 1
topo.append(A__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A__ )
if cnt != len(A__ ):
print("""Cycle exists""" )
else:
print(A__ )
# Adjacency List of Graph
UpperCAmelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 80 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 1 |
UpperCAmelCase_ = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
UpperCAmelCase_ = {value: key for key, value in encode_dict.items()}
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
if set(A__ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
__lowerCamelCase = """"""
for word in coded.split():
while len(A__ ) != 0:
decoded += decode_dict[word[:5]]
__lowerCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = [randint(-1000 , 1000 ) for i in range(10 )]
__lowerCamelCase = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase_ = make_dataset()
def lowerCamelCase__ ( A__ : list[int] , A__ : int ):
'''simple docstring'''
for triplet in permutations(A__ , 3 ):
if sum(A__ ) == target:
return tuple(sorted(A__ ) )
return (0, 0, 0)
def lowerCamelCase__ ( A__ : list[int] , A__ : int ):
'''simple docstring'''
arr.sort()
__lowerCamelCase = len(A__ )
for i in range(n - 1 ):
__lowerCamelCase, __lowerCamelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowerCamelCase = """
triplet_sum1(*dataset)
"""
__lowerCamelCase = """
triplet_sum2(*dataset)
"""
__lowerCamelCase = repeat(setup=A__ , stmt=A__ , repeat=5 , number=10000 )
__lowerCamelCase = repeat(setup=A__ , stmt=A__ , repeat=5 , number=10000 )
return (min(A__ ), min(A__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 80 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 1 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : BigBirdConfig
UpperCAmelCase__ : jnp.dtype = jnp.floataa
UpperCAmelCase__ : bool = True
def lowerCAmelCase__ ( self: List[str] ):
super().setup()
__lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self: Optional[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : str , A__ : List[str] , A__ : List[str] ):
'''simple docstring'''
def cross_entropy(A__ : Dict , A__ : List[str] , A__ : Tuple=None ):
__lowerCamelCase = logits.shape[-1]
__lowerCamelCase = (labels[..., None] == jnp.arange(A__ )[None]).astype("""f4""" )
__lowerCamelCase = jax.nn.log_softmax(A__ , axis=-1 )
__lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowerCamelCase = reduction(A__ )
return loss
__lowerCamelCase = partial(A__ , reduction=jnp.mean )
__lowerCamelCase = cross_entropy(A__ , A__ )
__lowerCamelCase = cross_entropy(A__ , A__ )
__lowerCamelCase = cross_entropy(A__ , A__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = "google/bigbird-roberta-base"
UpperCAmelCase__ : int = 3000
UpperCAmelCase__ : int = 1_0500
UpperCAmelCase__ : int = 128
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 5
# tx_args
UpperCAmelCase__ : float = 3E-5
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 2_0000
UpperCAmelCase__ : float = 0.00_95
UpperCAmelCase__ : str = "bigbird-roberta-natural-questions"
UpperCAmelCase__ : str = "training-expt"
UpperCAmelCase__ : str = "data/nq-training.jsonl"
UpperCAmelCase__ : str = "data/nq-validation.jsonl"
def lowerCAmelCase__ ( self: Optional[int] ):
os.makedirs(self.base_dir , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
__lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : int = 4096 # no dynamic padding on TPUs
def __call__( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.collate_fn(UpperCamelCase_ )
__lowerCamelCase = jax.tree_util.tree_map(UpperCamelCase_ , UpperCamelCase_ )
return batch
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase, __lowerCamelCase = self.fetch_inputs(features["""input_ids"""] )
__lowerCamelCase = {
"""input_ids""": jnp.array(UpperCamelCase_ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(UpperCamelCase_ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def lowerCAmelCase__ ( self: str , UpperCamelCase_: list ):
__lowerCamelCase = [self._fetch_inputs(UpperCamelCase_ ) for ids in input_ids]
return zip(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: list ):
__lowerCamelCase = [1 for _ in range(len(UpperCamelCase_ ) )]
while len(UpperCamelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] , A__ : List[Any]=None ):
'''simple docstring'''
if seed is not None:
__lowerCamelCase = dataset.shuffle(seed=A__ )
for i in range(len(A__ ) // batch_size ):
__lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A__ )
@partial(jax.pmap , axis_name="""batch""" )
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict , **A__ : Any ):
'''simple docstring'''
def loss_fn(A__ : Optional[int] ):
__lowerCamelCase = model_inputs.pop("""start_labels""" )
__lowerCamelCase = model_inputs.pop("""end_labels""" )
__lowerCamelCase = model_inputs.pop("""pooled_labels""" )
__lowerCamelCase = state.apply_fn(**A__ , params=A__ , dropout_rng=A__ , train=A__ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = outputs
return state.loss_fn(
A__ , A__ , A__ , A__ , A__ , A__ , )
__lowerCamelCase, __lowerCamelCase = jax.random.split(A__ )
__lowerCamelCase = jax.value_and_grad(A__ )
__lowerCamelCase, __lowerCamelCase = grad_fn(state.params )
__lowerCamelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
__lowerCamelCase = jax.lax.pmean(A__ , """batch""" )
__lowerCamelCase = state.apply_gradients(grads=A__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def lowerCamelCase__ ( A__ : Dict , **A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = model_inputs.pop("""start_labels""" )
__lowerCamelCase = model_inputs.pop("""end_labels""" )
__lowerCamelCase = model_inputs.pop("""pooled_labels""" )
__lowerCamelCase = state.apply_fn(**A__ , params=state.params , train=A__ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = outputs
__lowerCamelCase = state.loss_fn(A__ , A__ , A__ , A__ , A__ , A__ )
__lowerCamelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowerCamelCase__( train_state.TrainState):
UpperCAmelCase__ : Callable = struct.field(pytree_node=__lowerCamelCase)
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : Args
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : wandb
UpperCAmelCase__ : Callable = None
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any]=None ):
__lowerCamelCase = model.params
__lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase_ , tx=UpperCamelCase_ , loss_fn=UpperCamelCase_ , )
if ckpt_dir is not None:
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = restore_checkpoint(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
__lowerCamelCase, __lowerCamelCase = build_tx(**UpperCamelCase_ )
__lowerCamelCase = train_state.TrainState(
step=UpperCamelCase_ , apply_fn=model.__call__ , params=UpperCamelCase_ , tx=UpperCamelCase_ , opt_state=UpperCamelCase_ , )
__lowerCamelCase = args
__lowerCamelCase = data_collator
__lowerCamelCase = lr
__lowerCamelCase = params
__lowerCamelCase = jax_utils.replicate(UpperCamelCase_ )
return state
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.args
__lowerCamelCase = len(UpperCamelCase_ ) // args.batch_size
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(UpperCamelCase_ , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCamelCase = get_batched_dataset(UpperCamelCase_ , args.batch_size , seed=UpperCamelCase_ )
__lowerCamelCase = 0
for batch in tqdm(UpperCamelCase_ , total=UpperCamelCase_ , desc=F'Running EPOCH-{epoch}' ):
__lowerCamelCase = self.data_collator(UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.train_step_fn(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
__lowerCamelCase = jax_utils.unreplicate(state.step )
__lowerCamelCase = running_loss.item() / i
__lowerCamelCase = self.scheduler_fn(state_step - 1 )
__lowerCamelCase = self.evaluate(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(UpperCamelCase_ ) )
self.logger.log(UpperCamelCase_ , commit=UpperCamelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: List[Any] ):
__lowerCamelCase = get_batched_dataset(UpperCamelCase_ , self.args.batch_size )
__lowerCamelCase = len(UpperCamelCase_ ) // self.args.batch_size
__lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCamelCase = 0
for batch in tqdm(UpperCamelCase_ , total=UpperCamelCase_ , desc="""Evaluating ... """ ):
__lowerCamelCase = self.data_collator(UpperCamelCase_ )
__lowerCamelCase = self.val_step_fn(UpperCamelCase_ , **UpperCamelCase_ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = jax_utils.unreplicate(UpperCamelCase_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ )
self.model_save_fn(UpperCamelCase_ , params=state.params )
with open(os.path.join(UpperCamelCase_ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCamelCase_ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(UpperCamelCase_ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase_ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , UpperCamelCase_ )
print("""DONE""" )
def lowerCamelCase__ ( A__ : str , A__ : List[str] ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(A__ , """flax_model.msgpack""" ) , """rb""" ) as f:
__lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(A__ , """opt_state.msgpack""" ) , """rb""" ) as f:
__lowerCamelCase = from_bytes(state.opt_state , f.read() )
__lowerCamelCase = joblib.load(os.path.join(A__ , """args.joblib""" ) )
__lowerCamelCase = joblib.load(os.path.join(A__ , """data_collator.joblib""" ) )
with open(os.path.join(A__ , """training_state.json""" ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
__lowerCamelCase = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : List[str] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = num_train_steps - warmup_steps
__lowerCamelCase = optax.linear_schedule(init_value=A__ , end_value=A__ , transition_steps=A__ )
__lowerCamelCase = optax.linear_schedule(init_value=A__ , end_value=1E-7 , transition_steps=A__ )
__lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCamelCase__ ( A__ : Any , A__ : Union[str, Any] , A__ : Dict , A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
def weight_decay_mask(A__ : str ):
__lowerCamelCase = traverse_util.flatten_dict(A__ )
__lowerCamelCase = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(A__ )
__lowerCamelCase = scheduler_fn(A__ , A__ , A__ , A__ )
__lowerCamelCase = optax.adamw(learning_rate=A__ , weight_decay=A__ , mask=A__ )
return tx, lr
| 80 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = VideoMAEConfig()
set_architecture_configs(A__ , A__ )
if "finetuned" not in model_name:
__lowerCamelCase = False
if "finetuned" in model_name:
__lowerCamelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCamelCase = 400
__lowerCamelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCamelCase = 174
__lowerCamelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Optional[Any] ):
'''simple docstring'''
if "small" in model_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = 192
__lowerCamelCase = 768
elif "large" in model_name:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 512
__lowerCamelCase = 2048
elif "huge" in model_name:
__lowerCamelCase = 1280
__lowerCamelCase = 5120
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 640
__lowerCamelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
if "encoder." in name:
__lowerCamelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCamelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCamelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCamelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCamelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""head""" , """classifier""" )
return name
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(A__ )
if key.startswith("""encoder.""" ):
__lowerCamelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = int(key_split[1] )
__lowerCamelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val
return orig_state_dict
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCamelCase = np.load(A__ )
return list(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Tuple , A__ : Union[str, Any] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = get_videomae_config(A__ )
if "finetuned" in model_name:
__lowerCamelCase = VideoMAEForVideoClassification(A__ )
else:
__lowerCamelCase = VideoMAEForPreTraining(A__ )
# download original checkpoint, hosted on Google Drive
__lowerCamelCase = """pytorch_model.bin"""
gdown.cached_download(A__ , A__ , quiet=A__ )
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
if "model" in files:
__lowerCamelCase = files["""model"""]
else:
__lowerCamelCase = files["""module"""]
__lowerCamelCase = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
model.eval()
# verify model on basic input
__lowerCamelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCamelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCamelCase = torch.load(A__ )
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits
__lowerCamelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCamelCase = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , A__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , A__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCamelCase = outputs.loss
assert torch.allclose(A__ , A__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
model.save_pretrained(A__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(A__ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCamelCase__:
UpperCAmelCase__ : Any = BlenderbotConfig
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Union[str, Any] = 'gelu'
def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=13 , UpperCamelCase_: Any=7 , UpperCamelCase_: Any=True , UpperCamelCase_: Dict=False , UpperCamelCase_: Optional[int]=99 , UpperCamelCase_: Any=32 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=4 , UpperCamelCase_: Tuple=37 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Any=20 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: int=1 , UpperCamelCase_: List[str]=0 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCamelCase = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
__lowerCamelCase = inputs_dict["""input_ids"""]
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
__lowerCamelCase = inputs_dict["""head_mask"""]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def lowerCamelCase__ ( A__ : List[str] , A__ : str , A__ : str , A__ : int=None , A__ : List[Any]=None , A__ : Union[str, Any]=None , A__ : Optional[Any]=None , A__ : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase__ : str = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Any = False
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = TFBlenderbotModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Dict = ['My friends are cool but they eat too many carbs.']
UpperCAmelCase__ : List[str] = 'facebook/blenderbot-400M-distill'
@cached_property
def lowerCAmelCase__ ( self: int ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.tokenizer(self.src_text , return_tensors="""tf""" )
__lowerCamelCase = self.model.generate(
model_inputs.input_ids , )
__lowerCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 80 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 1 |
from math import isqrt, loga
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A__ , A__ ):
__lowerCamelCase = False
return [i for i in range(2 , A__ ) if is_prime[i]]
def lowerCamelCase__ ( A__ : int = 800800 , A__ : int = 800800 ):
'''simple docstring'''
__lowerCamelCase = degree * loga(A__ )
__lowerCamelCase = int(A__ )
__lowerCamelCase = calculate_prime_numbers(A__ )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = len(A__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 80 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase_ = logging.getLogger(__name__)
class lowerCamelCase__:
def __init__( self: str ):
__lowerCamelCase = False
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ):
if not self.initialized:
__lowerCamelCase = RagRetriever(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
__lowerCamelCase = True
def lowerCAmelCase__ ( self: Optional[Any] ):
self.retriever.index.init_index()
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase = self.retriever._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return doc_ids, retrieved_doc_embeds
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any=None ):
if index is not None and index.is_initialized() and len(UpperCamelCase_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
__lowerCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
for worker in self.retrieval_workers
] )
def lowerCAmelCase__ ( self: List[str] ):
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowerCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowerCamelCase, __lowerCamelCase = ray.get(random_worker.retrieve.remote(UpperCamelCase_ , UpperCamelCase_ ) )
else:
__lowerCamelCase, __lowerCamelCase = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple=None , **UpperCamelCase_: int ):
return super(UpperCamelCase_ , cls ).get_tokenizers(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: int=None , **UpperCamelCase_: int ):
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ ) or RagConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = RagTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
__lowerCamelCase = rag_tokenizer.question_encoder
__lowerCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowerCamelCase = """custom"""
__lowerCamelCase = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase_ )
else:
__lowerCamelCase = cls._build_index(UpperCamelCase_ )
return cls(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , retrieval_workers=UpperCamelCase_ , index=UpperCamelCase_ , )
| 80 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = 'pegasus'
UpperCAmelCase__ : Optional[int] = ['past_key_values']
UpperCAmelCase__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Tuple , UpperCamelCase_: List[Any]=5_02_65 , UpperCamelCase_: Any=10_24 , UpperCamelCase_: str=12 , UpperCamelCase_: Dict=40_96 , UpperCamelCase_: List[str]=16 , UpperCamelCase_: int=12 , UpperCamelCase_: Union[str, Any]=40_96 , UpperCamelCase_: int=16 , UpperCamelCase_: int=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple="gelu" , UpperCamelCase_: List[str]=10_24 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: int=False , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: Tuple=1 , UpperCamelCase_: Any=1 , **UpperCamelCase_: Dict , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return self.d_model
| 80 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , UpperCamelCase_: int = 1_01 ):
__lowerCamelCase = length
def __len__( self: Dict ):
return self.length
def __getitem__( self: Optional[Any] , UpperCamelCase_: Dict ):
return i
class lowerCamelCase__:
def __call__( self: Dict , UpperCamelCase_: List[str] ):
return {"input_ids": torch.tensor(UpperCamelCase_ ), "labels": torch.tensor(UpperCamelCase_ )}
class lowerCamelCase__( nn.Module):
def __init__( self: str ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowerCamelCase = nn.Linear(1_20 , 80 )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCamelCase__( __lowerCamelCase):
@require_torch_neuroncore
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = F'--output_dir {output_dir}'.split()
__lowerCamelCase = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCamelCase__( __lowerCamelCase):
@require_torch_multi_gpu
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = F'--output_dir {output_dir}'.split()
__lowerCamelCase = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase_ = HfArgumentParser((TrainingArguments,))
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
UpperCAmelCase_ = DummyDataset(dataset_length)
def lowerCamelCase__ ( A__ : EvalPrediction ):
'''simple docstring'''
__lowerCamelCase = list(range(len(A__ ) ) )
__lowerCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
UpperCAmelCase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ = 2
UpperCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ = None
| 80 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
def lowerCamelCase__ ( A__ : str , A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[] for _ in range(A__ )]
__lowerCamelCase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(A__ ) <= key:
return input_string
for position, character in enumerate(A__ ):
__lowerCamelCase = position % (lowest * 2) # puts it in bounds
__lowerCamelCase = min(A__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A__ )
__lowerCamelCase = ["""""".join(A__ ) for row in temp_grid]
__lowerCamelCase = """""".join(A__ )
return output_string
def lowerCamelCase__ ( A__ : str , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__lowerCamelCase = [[] for _ in range(A__ )] # generates template
for position in range(len(A__ ) ):
__lowerCamelCase = position % (lowest * 2) # puts it in bounds
__lowerCamelCase = min(A__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__lowerCamelCase = 0
for row in temp_grid: # fills in the characters
__lowerCamelCase = input_string[counter : counter + len(A__ )]
grid.append(list(A__ ) )
counter += len(A__ )
__lowerCamelCase = """""" # reads as zigzag
for position in range(len(A__ ) ):
__lowerCamelCase = position % (lowest * 2) # puts it in bounds
__lowerCamelCase = min(A__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = {}
for key_guess in range(1 , len(A__ ) ): # tries every key
__lowerCamelCase = decrypt(A__ , A__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 1 |
import operator as op
UpperCAmelCase_ = 'scaler.pt'
UpperCAmelCase_ = 'pytorch_model'
UpperCAmelCase_ = 'random_states'
UpperCAmelCase_ = 'optimizer'
UpperCAmelCase_ = 'scheduler'
UpperCAmelCase_ = 'pytorch_model.bin'
UpperCAmelCase_ = 'pytorch_model.bin.index.json'
UpperCAmelCase_ = 'model.safetensors'
UpperCAmelCase_ = 'model.safetensors.index.json'
UpperCAmelCase_ = '1.10.2'
UpperCAmelCase_ = 'py38'
UpperCAmelCase_ = '4.17.0'
UpperCAmelCase_ = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCAmelCase_ = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCAmelCase_ = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCAmelCase_ = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCAmelCase_ = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCAmelCase_ = '2.0.1'
UpperCAmelCase_ = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCAmelCase_ = ['default', 'reduce-overhead', 'max-autotune']
UpperCAmelCase_ = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase_ = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCAmelCase_ = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCAmelCase_ = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 80 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 1 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int("""1""" + """0""" * digit_len )
for num in range(A__ , A__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(A__ , A__ ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def lowerCamelCase__ ( A__ : int = 2 ):
'''simple docstring'''
__lowerCamelCase = 1.0
for fraction in fraction_list(A__ ):
__lowerCamelCase = Fraction(A__ )
result *= frac.denominator / frac.numerator
return int(A__ )
if __name__ == "__main__":
print(solution())
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'big_bird'
def __init__( self: Tuple , UpperCamelCase_: str=5_03_58 , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[Any]=30_72 , UpperCamelCase_: Union[str, Any]="gelu_new" , UpperCamelCase_: str=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=40_96 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: Any=1E-12 , UpperCamelCase_: Tuple=True , UpperCamelCase_: str=0 , UpperCamelCase_: str=1 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=66 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Any=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=64 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=None , **UpperCamelCase_: Tuple , ):
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 80 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 1 |
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return str(A__ ) == str(A__ )[::-1]
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return int(A__ ) + int(str(A__ )[::-1] )
def lowerCamelCase__ ( A__ : int = 10000 ):
'''simple docstring'''
__lowerCamelCase = []
for num in range(1 , A__ ):
__lowerCamelCase = 0
__lowerCamelCase = num
while iterations < 50:
__lowerCamelCase = sum_reverse(A__ )
iterations += 1
if is_palindrome(A__ ):
break
else:
lychrel_nums.append(A__ )
return len(A__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 80 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
UpperCAmelCase_ = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Union[str, Any] = SqueezeBertTokenizer
def __init__( self: Optional[Any] , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None , UpperCamelCase_: Dict=True , UpperCamelCase_: Dict="[UNK]" , UpperCamelCase_: Any="[SEP]" , UpperCamelCase_: Optional[Any]="[PAD]" , UpperCamelCase_: int="[CLS]" , UpperCamelCase_: Any="[MASK]" , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: List[str] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**UpperCamelCase_ )
__lowerCamelCase = do_lower_case
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 80 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase__ ( A__ : str , A__ : complex , A__ : str = "x" , A__ : float = 10**-10 , A__ : int = 1 , ):
'''simple docstring'''
__lowerCamelCase = symbols(A__ )
__lowerCamelCase = lambdify(A__ , A__ )
__lowerCamelCase = lambdify(A__ , diff(A__ , A__ ) )
__lowerCamelCase = starting_point
while True:
if diff_function(A__ ) != 0:
__lowerCamelCase = prev_guess - multiplicity * func(A__ ) / diff_function(
A__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowerCamelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 80 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = 10
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = [1, 2, 3, 4]
__lowerCamelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """"""
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
__lowerCamelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = ["""It was the best of times."""]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch.tensor([1, 2, 3, 4] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 1_01
__lowerCamelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowerCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCamelCase = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 80 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase = mock.Mock()
__lowerCamelCase = 5_00
__lowerCamelCase = {}
__lowerCamelCase = HTTPError
__lowerCamelCase = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase_ ) as mock_head:
__lowerCamelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self: Dict ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase__ ( self: Union[str, Any] ):
with self.assertRaises(UpperCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
__lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(UpperCamelCase_ )
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Tuple ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] ):
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ViTImageProcessor.from_pretrained(UpperCamelCase_ )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
__lowerCamelCase = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase_ , repo_id="""test-image-processor""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = ViTImageProcessor.from_pretrained(UpperCamelCase_ )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
__lowerCamelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
CustomImageProcessor.register_for_auto_class()
__lowerCamelCase = CustomImageProcessor.from_pretrained(UpperCamelCase_ )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
__lowerCamelCase = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 80 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[str] = ['image_processor', 'tokenizer']
UpperCAmelCase__ : int = 'BlipImageProcessor'
UpperCAmelCase__ : Optional[int] = 'AutoTokenizer'
def __init__( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
# add QFormer tokenizer
__lowerCamelCase = qformer_tokenizer
def __call__( self: Any , UpperCamelCase_: ImageInput = None , UpperCamelCase_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Union[bool, str, TruncationStrategy] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, TensorType]] = None , **UpperCamelCase_: str , ):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
__lowerCamelCase = BatchFeature()
if text is not None:
__lowerCamelCase = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
encoding.update(UpperCamelCase_ )
__lowerCamelCase = self.qformer_tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = qformer_text_encoding.pop("""input_ids""" )
__lowerCamelCase = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
encoding.update(UpperCamelCase_ )
return encoding
def lowerCAmelCase__ ( self: Dict , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: str ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: List[str] ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[str] , **UpperCamelCase_: int ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCamelCase_ )
return super().save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: int , UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Tuple ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , subfolder="""qformer_tokenizer""" )
__lowerCamelCase = cls._get_arguments_from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
args.append(UpperCamelCase_ )
return cls(*UpperCamelCase_ )
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 1 |
from __future__ import annotations
import math
class lowerCamelCase__:
def __init__( self: Optional[Any] , UpperCamelCase_: int ):
__lowerCamelCase = size
# approximate the overall size of segment tree with given value
__lowerCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__lowerCamelCase = [0 for i in range(0 , 4 * size )]
__lowerCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int ):
return idx * 2
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int ):
return idx * 2 + 1
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[int] ):
if left_element == right_element:
__lowerCamelCase = a[left_element - 1]
else:
__lowerCamelCase = (left_element + right_element) // 2
self.build(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.build(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = max(
self.segment_tree[self.left(UpperCamelCase_ )] , self.segment_tree[self.right(UpperCamelCase_ )] )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ):
if self.flag[idx] is True:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = False
if left_element != right_element:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = True
__lowerCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__lowerCamelCase = val
if left_element != right_element:
__lowerCamelCase = val
__lowerCamelCase = val
__lowerCamelCase = True
__lowerCamelCase = True
return True
__lowerCamelCase = (left_element + right_element) // 2
self.update(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.update(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = max(
self.segment_tree[self.left(UpperCamelCase_ )] , self.segment_tree[self.right(UpperCamelCase_ )] )
return True
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ):
if self.flag[idx] is True:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = False
if left_element != right_element:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = True
__lowerCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__lowerCamelCase = (left_element + right_element) // 2
__lowerCamelCase = self.query(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.query(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return max(UpperCamelCase_ , UpperCamelCase_ )
def __str__( self: Optional[int] ):
return str([self.query(1 , 1 , self.size , UpperCamelCase_ , UpperCamelCase_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCAmelCase_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase_ = 15
UpperCAmelCase_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 80 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 1 |
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = name
__lowerCamelCase = val
def __str__( self: str ):
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self: Optional[int] , UpperCamelCase_: Any ):
return self.val < other.val
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = self.build_heap(UpperCamelCase_ )
def __getitem__( self: Optional[int] , UpperCamelCase_: Union[str, Any] ):
return self.get_value(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
return (idx - 1) // 2
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return idx * 2 + 1
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any ):
return idx * 2 + 2
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
return self.heap_dict[key]
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = len(UpperCamelCase_ ) - 1
__lowerCamelCase = self.get_parent_idx(UpperCamelCase_ )
for idx, i in enumerate(UpperCamelCase_ ):
__lowerCamelCase = idx
__lowerCamelCase = i.val
for i in range(UpperCamelCase_ , -1 , -1 ):
self.sift_down(UpperCamelCase_ , UpperCamelCase_ )
return array
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple ):
while True:
__lowerCamelCase = self.get_left_child_idx(UpperCamelCase_ ) # noqa: E741
__lowerCamelCase = self.get_right_child_idx(UpperCamelCase_ )
__lowerCamelCase = idx
if l < len(UpperCamelCase_ ) and array[l] < array[idx]:
__lowerCamelCase = l
if r < len(UpperCamelCase_ ) and array[r] < array[smallest]:
__lowerCamelCase = r
if smallest != idx:
__lowerCamelCase, __lowerCamelCase = array[smallest], array[idx]
(
(
__lowerCamelCase
), (
__lowerCamelCase
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowerCamelCase = smallest
else:
break
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
__lowerCamelCase = self.get_parent_idx(UpperCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowerCamelCase, __lowerCamelCase = self.heap[idx], self.heap[p]
__lowerCamelCase, __lowerCamelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowerCamelCase = p
__lowerCamelCase = self.get_parent_idx(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
return self.heap[0]
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase, __lowerCamelCase = self.heap[-1], self.heap[0]
__lowerCamelCase, __lowerCamelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowerCamelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
self.heap.append(UpperCamelCase_ )
__lowerCamelCase = len(self.heap ) - 1
__lowerCamelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCAmelCase__ ( self: List[str] ):
return len(self.heap ) == 0
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowerCamelCase = new_value
__lowerCamelCase = new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase_ = Node('R', -1)
UpperCAmelCase_ = Node('B', 6)
UpperCAmelCase_ = Node('A', 3)
UpperCAmelCase_ = Node('X', 1)
UpperCAmelCase_ = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase_ = '1'
UpperCAmelCase_ = '0'
UpperCAmelCase_ = '1'
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
UpperCAmelCase_ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
UpperCAmelCase_ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase_ = ort.RunOptions()
UpperCAmelCase_ = 128
UpperCAmelCase_ = 1
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = 2_000
UpperCAmelCase_ = {}
for iter in range(max_iters):
UpperCAmelCase_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
| 80 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = (
os.path.join(UpperCamelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCamelCase = Extractor
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCamelCase = os.path.abspath(UpperCamelCase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: bool ):
return force_extract or (
not os.path.isfile(UpperCamelCase_ ) and not (os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ))
)
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
__lowerCamelCase = self.extractor.infer_extractor_format(UpperCamelCase_ )
if not extractor_format:
return input_path
__lowerCamelCase = self._get_output_path(UpperCamelCase_ )
if self._do_extract(UpperCamelCase_ , UpperCamelCase_ ):
self.extractor.extract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return output_path
class lowerCamelCase__( __lowerCamelCase):
@classmethod
@abstractmethod
def lowerCAmelCase__ ( cls: Optional[Any] , UpperCamelCase_: Union[Path, str] , **UpperCamelCase_: Optional[int] ):
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
...
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[bytes] = []
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: int ):
with open(UpperCamelCase_ , """rb""" ) as f:
return f.read(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Any , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bytes = b"" ):
if not magic_number:
__lowerCamelCase = max(len(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCamelCase = cls.read_magic_number(UpperCamelCase_ , UpperCamelCase_ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase__( __lowerCamelCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[Path, str] , **UpperCamelCase_: int ):
return tarfile.is_tarfile(UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Tuple , UpperCamelCase_: str ):
def resolved(UpperCamelCase_: str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase_ ) )
def badpath(UpperCamelCase_: str , UpperCamelCase_: str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ).startswith(UpperCamelCase_ )
def badlink(UpperCamelCase_: Optional[int] , UpperCamelCase_: str ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCamelCase = resolved(os.path.join(UpperCamelCase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase_ )
__lowerCamelCase = resolved(UpperCamelCase_ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(UpperCamelCase_ , UpperCamelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(UpperCamelCase_ , UpperCamelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_ , members=TarExtractor.safemembers(UpperCamelCase_ , UpperCamelCase_ ) )
tar_file.close()
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = [b'\x1F\x8B']
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
with gzip.open(UpperCamelCase_ , """rb""" ) as gzip_file:
with open(UpperCamelCase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bytes = b"" ):
if super().is_extractable(UpperCamelCase_ , magic_number=UpperCamelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase_ , """rb""" ) as fp:
__lowerCamelCase = _EndRecData(UpperCamelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCamelCase = fp.read(UpperCamelCase_ ) # CD is where we expect it to be
if len(UpperCamelCase_ ) == sizeCentralDir:
__lowerCamelCase = struct.unpack(UpperCamelCase_ , UpperCamelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with zipfile.ZipFile(UpperCamelCase_ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
with lzma.open(UpperCamelCase_ ) as compressed_file:
with open(UpperCamelCase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = rarfile.RarFile(UpperCamelCase_ )
rf.extractall(UpperCamelCase_ )
rf.close()
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = [b'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__lowerCamelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase_ , """rb""" ) as ifh, open(UpperCamelCase_ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase_ , UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = [b'\x42\x5A\x68']
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
with bza.open(UpperCamelCase_ , """rb""" ) as compressed_file:
with open(UpperCamelCase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with pyazr.SevenZipFile(UpperCamelCase_ , """r""" ) as archive:
archive.extractall(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = [b'\x04\x22\x4D\x18']
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase_ , """rb""" ) as compressed_file:
with open(UpperCamelCase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class lowerCamelCase__:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCAmelCase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ ( cls: Union[str, Any] ):
return max(
len(UpperCamelCase_ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase_ , UpperCamelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: int ):
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase_ , magic_number_length=UpperCamelCase_ )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bool = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase_ , )
__lowerCamelCase = cls.infer_extractor_format(UpperCamelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Union[Path, str] ): # <Added version="2.4.0"/>
__lowerCamelCase = cls._get_magic_number_max_length()
__lowerCamelCase = cls._read_magic_number(UpperCamelCase_ , UpperCamelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase_ , magic_number=UpperCamelCase_ ):
return extractor_format
@classmethod
def lowerCAmelCase__ ( cls: Tuple , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(UpperCamelCase_ ) , exist_ok=UpperCamelCase_ )
# Prevent parallel extractions
__lowerCamelCase = str(Path(UpperCamelCase_ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_ , ignore_errors=UpperCamelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase_ , UpperCamelCase_ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase_ , )
__lowerCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__lowerCamelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase_ ):
return extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
| 80 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase__ ( A__ : Dict=None ):
'''simple docstring'''
if subparsers is not None:
__lowerCamelCase = subparsers.add_parser("""test""" )
else:
__lowerCamelCase = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=A__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__lowerCamelCase = script_name
else:
__lowerCamelCase = f'--config_file={args.config_file} {script_name}'
__lowerCamelCase = ["""accelerate-launch"""] + test_args.split()
__lowerCamelCase = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = test_command_parser()
__lowerCamelCase = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main()
| 80 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 1 |
from __future__ import annotations
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = str(A__ )
__lowerCamelCase = [n]
for i in range(1 , len(A__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if len(str(A__ ) ) > 3:
if not is_prime(int(str(A__ )[-3:] ) ) or not is_prime(int(str(A__ )[:3] ) ):
return False
return True
def lowerCamelCase__ ( A__ : int = 11 ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 13
while len(A__ ) != count:
if validate(A__ ):
__lowerCamelCase = list_truncated_nums(A__ )
if all(is_prime(A__ ) for i in list_nums ):
list_truncated_primes.append(A__ )
num += 2
return list_truncated_primes
def lowerCamelCase__ ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 80 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.