code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase_ = { """vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""}, """tokenizer_file""": { """mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json""" }, } lowerCamelCase_ = {"""mobilebert-uncased""": 5_1_2} lowerCamelCase_ = {} class a_ ( a_ ): '''simple docstring''' __a: Optional[Any] = VOCAB_FILES_NAMES __a: Tuple = PRETRAINED_VOCAB_FILES_MAP __a: List[str] = PRETRAINED_INIT_CONFIGURATION __a: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: Optional[int] = MobileBertTokenizer def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Union[str, Any]: '''simple docstring''' super().__init__( lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , ) lowerCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowercase_ ) != do_lower_case or normalizer_state.get('strip_accents' , lowercase_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowercase_ ) != tokenize_chinese_chars ): lowerCAmelCase_ = getattr(lowercase_ , normalizer_state.pop('type' ) ) lowerCAmelCase_ = do_lower_case lowerCAmelCase_ = strip_accents lowerCAmelCase_ = tokenize_chinese_chars lowerCAmelCase_ = normalizer_class(**lowercase_ ) lowerCAmelCase_ = do_lower_case def _lowercase ( self , lowercase_ , lowercase_=None ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]: '''simple docstring''' lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]: '''simple docstring''' lowerCAmelCase_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ )
14
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase ( a_ ) -> Any: lowerCAmelCase_ = tmp_path / 'file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = tmp_path / 'malformed_file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ , a_ ) -> List[str]: lowerCAmelCase_ = tmp_path / 'csv_with_image.csv' lowerCAmelCase_ = textwrap.dedent( F'''\ image {image_file} ''' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = tmp_path / 'csv_with_label.csv' lowerCAmelCase_ = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv' lowerCAmelCase_ = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]: lowerCAmelCase_ = Csv() lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(a_ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(a_ ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase ( a_ ) -> Optional[Any]: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() lowerCAmelCase_ = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase ( a_ ) -> int: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1:] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() lowerCAmelCase_ = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels] def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) lowerCAmelCase_ = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
14
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Any: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def lowerCamelCase ( a_ , a_ , a_ , a_ , a_=True ) -> Any: model.train() lowerCAmelCase_ = model(a_ ) lowerCAmelCase_ = F.mse_loss(a_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(a_ ) def lowerCamelCase ( a_ , a_=False ) -> str: set_seed(42 ) lowerCAmelCase_ = RegressionModel() lowerCAmelCase_ = deepcopy(a_ ) lowerCAmelCase_ = RegressionDataset(length=80 ) lowerCAmelCase_ = DataLoader(a_ , batch_size=16 ) model.to(accelerator.device ) if sched: lowerCAmelCase_ = AdamW(params=model.parameters() , lr=1e-3 ) lowerCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) lowerCAmelCase_ = LambdaLR(a_ , lr_lambda=lambda a_ : epoch**0.65 ) lowerCAmelCase_ = LambdaLR(a_ , lr_lambda=lambda a_ : epoch**0.65 ) # Make a copy of `model` if sched: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(a_ , a_ , a_ , a_ ) else: lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(a_ , a_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCamelCase ( a_ ) -> Any: # Test when on a single CPU or GPU that the context manager does nothing lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = get_training_setup(a_ ) # Use a single batch lowerCAmelCase_ , lowerCAmelCase_ = next(iter(a_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a_ , a_ , a_ , a_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a_ ): step_model(a_ , a_ , a_ , a_ ) else: # Sync grads step_model(a_ , a_ , a_ , a_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(a_ , a_ , a_ , a_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowerCAmelCase_ = ddp_input[torch.randperm(len(a_ ) )] def lowerCamelCase ( a_ ) -> Tuple: # Test on distributed setup that context manager behaves properly lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = get_training_setup(a_ ) # Use a single batch lowerCAmelCase_ , lowerCAmelCase_ = next(iter(a_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a_ , a_ , a_ , a_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a_ ): step_model(a_ , a_ , a_ , a_ ) else: # Sync grads step_model(a_ , a_ , a_ , a_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowerCAmelCase_ = ddp_input[torch.randperm(len(a_ ) )] def lowerCamelCase ( a_=False , a_=False ) -> str: lowerCAmelCase_ = Accelerator( split_batches=a_ , dispatch_batches=a_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = get_training_setup(a_ ) for iteration, batch in enumerate(a_ ): lowerCAmelCase_ , lowerCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a_ , a_ , a_ , a_ , a_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(a_ ): step_model(a_ , a_ , a_ , a_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(a_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowerCAmelCase_ = ddp_input[torch.randperm(len(a_ ) )] GradientState._reset_state() def lowerCamelCase ( a_=False , a_=False ) -> Union[str, Any]: lowerCAmelCase_ = Accelerator( split_batches=a_ , dispatch_batches=a_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = get_training_setup(a_ , a_ ) for iteration, batch in enumerate(a_ ): lowerCAmelCase_ , lowerCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase_ , lowerCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(a_ , a_ , a_ , a_ , a_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(a_ ): step_model(a_ , a_ , a_ , a_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowerCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a_ )) if accelerator.num_processes > 1: check_model_parameters(a_ , a_ , a_ , a_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def lowerCamelCase ( ) -> List[Any]: lowerCAmelCase_ = Accelerator() lowerCAmelCase_ = RegressionDataset(length=80 ) lowerCAmelCase_ = DataLoader(a_ , batch_size=16 ) lowerCAmelCase_ = RegressionDataset(length=96 ) lowerCAmelCase_ = DataLoader(a_ , batch_size=16 ) lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(a_ , a_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(a_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a_ ) if iteration < len(a_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(a_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a_ ) if batch_num < len(a_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCamelCase ( ) -> Dict: lowerCAmelCase_ = Accelerator() lowerCAmelCase_ = accelerator.state if state.local_process_index == 0: print('**Test `accumulate` gradient accumulation with dataloader break**' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('**Test NOOP `no_sync` context manager**' ) test_noop_sync(a_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('**Test Distributed `no_sync` context manager**' ) test_distributed_sync(a_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(a_ , a_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(a_ , a_ ) def lowerCamelCase ( a_ ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
14
from maths.prime_factors import prime_factors def lowerCamelCase ( a_ ) -> int: if not isinstance(a_ , a_ ): lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(a_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
14
1
from __future__ import annotations lowerCamelCase_ = 1_0 def lowerCamelCase ( a_ ) -> list[int]: lowerCAmelCase_ = 1 lowerCAmelCase_ = max(a_ ) while placement <= max_digit: # declare and initialize empty buckets lowerCAmelCase_ = [[] for _ in range(a_ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCAmelCase_ = int((i / placement) % RADIX ) buckets[tmp].append(a_ ) # put each buckets' contents into list_of_ints lowerCAmelCase_ = 0 for b in range(a_ ): for i in buckets[b]: lowerCAmelCase_ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
14
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( a_ , a_ ) -> Tuple: lowerCAmelCase_ = XCLIPTextConfig() # derive patch size from model name lowerCAmelCase_ = model_name.find('patch' ) lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ ) if "large" in model_name: lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 lowerCAmelCase_ = 12 lowerCAmelCase_ = 1_024 lowerCAmelCase_ = 4_096 lowerCAmelCase_ = 16 lowerCAmelCase_ = 24 lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 if model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = 336 lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ ) if "large" in model_name: lowerCAmelCase_ = 768 return config def lowerCamelCase ( a_ ) -> List[str]: # text encoder if name == "token_embedding.weight": lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowerCAmelCase_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowerCAmelCase_ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowerCAmelCase_ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def lowerCamelCase ( a_ , a_ ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(a_ ) if "attn.in_proj" in key: lowerCAmelCase_ = key.split('.' ) if key.startswith('visual' ): lowerCAmelCase_ = key_split[3] lowerCAmelCase_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[ :dim ] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[ -dim: ] else: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] elif key.startswith('mit' ): lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.vision_config.mit_hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[dim : dim * 2, :] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = rename_key(a_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowerCAmelCase_ = val.T lowerCAmelCase_ = val return orig_state_dict def lowerCamelCase ( a_ ) -> List[str]: if num_frames == 8: lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowerCAmelCase_ = 'eating_spaghetti.npy' elif num_frames == 32: lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy' lowerCAmelCase_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , ) lowerCAmelCase_ = np.load(a_ ) return list(a_ ) def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]: lowerCAmelCase_ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowerCAmelCase_ = model_to_url[model_name] lowerCAmelCase_ = 8 if "16-frames" in model_name: lowerCAmelCase_ = 16 elif "shot" in model_name: lowerCAmelCase_ = 32 lowerCAmelCase_ = get_xclip_config(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) model.eval() if "drive" in checkpoint_url: lowerCAmelCase_ = 'pytorch_model.bin' gdown.cached_download(a_ , a_ , quiet=a_ ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model'] else: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model'] lowerCAmelCase_ = convert_state_dict(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ ) lowerCAmelCase_ = prepare_video(a_ ) lowerCAmelCase_ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) # Verify outputs lowerCAmelCase_ = outputs.logits_per_video lowerCAmelCase_ = logits_per_video.softmax(dim=1 ) print('Probs:' , a_ ) # kinetics-400 if model_name == "xclip-base-patch32": lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(a_ , a_ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(a_ , organization='nielsr' ) processor.push_to_hub(a_ , organization='nielsr' ) slow_tokenizer.push_to_hub(a_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
1
lowerCamelCase_ = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase_ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
14
def lowerCamelCase ( a_ , a_ ) -> List[Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: lowerCAmelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
14
1
from copy import deepcopy class a_ : '''simple docstring''' def __init__( self , lowercase_ = None , lowercase_ = None ) -> None: '''simple docstring''' if arr is None and size is not None: lowerCAmelCase_ = size lowerCAmelCase_ = [0] * size elif arr is not None: self.init(lowercase_ ) else: raise ValueError('Either arr or size must be specified' ) def _lowercase ( self , lowercase_ ) -> None: '''simple docstring''' lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = deepcopy(lowercase_ ) for i in range(1 , self.size ): lowerCAmelCase_ = self.next_(lowercase_ ) if j < self.size: self.tree[j] += self.tree[i] def _lowercase ( self ) -> list[int]: '''simple docstring''' lowerCAmelCase_ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): lowerCAmelCase_ = self.next_(lowercase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _lowercase ( lowercase_ ) -> int: '''simple docstring''' return index + (index & (-index)) @staticmethod def _lowercase ( lowercase_ ) -> int: '''simple docstring''' return index - (index & (-index)) def _lowercase ( self , lowercase_ , lowercase_ ) -> None: '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value lowerCAmelCase_ = self.next_(lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ ) -> None: '''simple docstring''' self.add(lowercase_ , value - self.get(lowercase_ ) ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' if right == 0: return 0 lowerCAmelCase_ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] lowerCAmelCase_ = self.prev(lowercase_ ) return result def _lowercase ( self , lowercase_ , lowercase_ ) -> int: '''simple docstring''' return self.prefix(lowercase_ ) - self.prefix(lowercase_ ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' return self.query(lowercase_ , index + 1 ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' value -= self.tree[0] if value < 0: return -1 lowerCAmelCase_ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 lowerCAmelCase_ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
14
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class a_ ( a_ ): '''simple docstring''' __a: str = ['''vqvae'''] def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ ) def _lowercase ( self ) -> int: '''simple docstring''' return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0 @torch.no_grad() def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' lowerCAmelCase_ = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase_ , device=self.device , ) lowerCAmelCase_ = noise lowerCAmelCase_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase_ , lowercase_ ) lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ ) lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample( generator=lowercase_ )[0] lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase_ = int(mask_start_secs * pixels_per_second ) lowerCAmelCase_ = int(mask_end_secs * pixels_per_second ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase_ ): lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample'] else: lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] if isinstance(self.scheduler , lowercase_ ): lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample'] else: lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample'] if mask is not None: if mask_start > 0: lowerCAmelCase_ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample'] lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' ) lowerCAmelCase_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) ) lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , lowercase_ ) self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase_ = self.scheduler.alphas_cumprod[t] lowerCAmelCase_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase_ = 1 - alpha_prod_t lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor: '''simple docstring''' lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
14
1
from ..utils import DummyObject, requires_backends class a_ ( metaclass=a_ ): '''simple docstring''' __a: Optional[Any] = ['''flax''', '''transformers'''] def __init__( self , *lowercase_ , **lowercase_ ) -> int: '''simple docstring''' requires_backends(self , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> int: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] ) class a_ ( metaclass=a_ ): '''simple docstring''' __a: Tuple = ['''flax''', '''transformers'''] def __init__( self , *lowercase_ , **lowercase_ ) -> int: '''simple docstring''' requires_backends(self , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> Any: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] ) class a_ ( metaclass=a_ ): '''simple docstring''' __a: Optional[int] = ['''flax''', '''transformers'''] def __init__( self , *lowercase_ , **lowercase_ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> int: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> Dict: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] ) class a_ ( metaclass=a_ ): '''simple docstring''' __a: Any = ['''flax''', '''transformers'''] def __init__( self , *lowercase_ , **lowercase_ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> int: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] ) @classmethod def _lowercase ( cls , *lowercase_ , **lowercase_ ) -> Any: '''simple docstring''' requires_backends(cls , ['flax', 'transformers'] )
14
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]: def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ): lowerCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: lowerCAmelCase_ = math.ceil(val / multiple ) * multiple return x lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = output_size # determine new height and width lowerCAmelCase_ = output_height / input_height lowerCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCAmelCase_ = scale_width else: # fit height lowerCAmelCase_ = scale_height lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ ) lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ ) return (new_height, new_width) class a_ ( a_ ): '''simple docstring''' __a: Union[str, Any] = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4} lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCAmelCase_ = get_resize_output_image_size( lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict: '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: '''simple docstring''' lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowerCAmelCase_ = {'pixel_values': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowercase_ ): lowerCAmelCase_ = target_sizes.numpy() lowerCAmelCase_ = [] for idx in range(len(lowercase_ ) ): lowerCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ ) lowerCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: lowerCAmelCase_ = logits.argmax(dim=1 ) lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
14
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
14
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase_ = 1_6 lowerCamelCase_ = 3_2 def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ = 16 ) -> Optional[int]: lowerCAmelCase_ = AutoTokenizer.from_pretrained('bert-base-cased' ) lowerCAmelCase_ = DatasetDict( { 'train': dataset['train'].select(a_ ), 'validation': dataset['train'].select(a_ ), 'test': dataset['validation'], } ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ = datasets.map( a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase_ = 8 else: lowerCAmelCase_ = None return tokenizer.pad( a_ , padding='longest' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='pt' , ) # Instantiate dataloaders. lowerCAmelCase_ = DataLoader( tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) lowerCAmelCase_ = DataLoader( tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) lowerCAmelCase_ = DataLoader( tokenized_datasets['test'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase ( a_ , a_ ) -> Optional[int]: # New Code # lowerCAmelCase_ = [] # Download the dataset lowerCAmelCase_ = load_dataset('glue' , 'mrpc' ) # Create our splits lowerCAmelCase_ = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator lowerCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ = config['lr'] lowerCAmelCase_ = int(config['num_epochs'] ) lowerCAmelCase_ = int(config['seed'] ) lowerCAmelCase_ = int(config['batch_size'] ) lowerCAmelCase_ = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation lowerCAmelCase_ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase_ = MAX_GPU_BATCH_SIZE set_seed(a_ ) # New Code # # Create our folds: lowerCAmelCase_ = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] ) lowerCAmelCase_ = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(a_ ): lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = get_fold_dataloaders( a_ , a_ , a_ , a_ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ = AdamW(params=model.parameters() , lr=a_ ) # Instantiate scheduler lowerCAmelCase_ = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # Now we train the model for epoch in range(a_ ): model.train() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase_ = model(**a_ ) lowerCAmelCase_ = outputs.loss lowerCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(a_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) lowerCAmelCase_ = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=a_ , references=a_ , ) lowerCAmelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , a_ ) # New Code # # We also run predictions on the test set at the very end lowerCAmelCase_ = [] for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(a_ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: lowerCAmelCase_ = torch.cat(a_ , dim=0 ) lowerCAmelCase_ = torch.stack(a_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) lowerCAmelCase_ = metric.compute(predictions=a_ , references=a_ ) accelerator.print('Average test metrics from all folds:' , a_ ) def lowerCamelCase ( ) -> Optional[Any]: lowerCAmelCase_ = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=a_ , default=a_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) # New Code # parser.add_argument('--num_folds' , type=a_ , default=3 , help='The number of splits to perform across the dataset' ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(a_ , a_ ) if __name__ == "__main__": main()
14
from __future__ import annotations import queue class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = data lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCamelCase ( ) -> TreeNode: print('\n********Press N to stop entering at any point of time********\n' ) lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower() lowerCAmelCase_ = queue.Queue() lowerCAmelCase_ = TreeNode(int(a_ ) ) q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = left_node q.put(a_ ) lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = right_node q.put(a_ ) raise def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = [] while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(a_ ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(a_ ) lowerCAmelCase_ = n.left # end of while means current node doesn't have left child lowerCAmelCase_ = stack.pop() # start to traverse its right child lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: stack.append(a_ ) lowerCAmelCase_ = n.left lowerCAmelCase_ = stack.pop() print(n.data , end=',' ) lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ , lowerCAmelCase_ = [], [] lowerCAmelCase_ = node stacka.append(a_ ) while stacka: # to find the reversed order of post order, store it in stack2 lowerCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(a_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str: if not s: return "\n" + width * char lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowerCamelCase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 5_0 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
14
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCamelCase_ = 1_6 lowerCamelCase_ = 3_2 def lowerCamelCase ( a_ ) -> int: return int(x / 2**20 ) class a_ : '''simple docstring''' def __enter__( self ) -> Optional[int]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCAmelCase_ = torch.cuda.memory_allocated() return self def __exit__( self , *lowercase_ ) -> Union[str, Any]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() lowerCAmelCase_ = torch.cuda.memory_allocated() lowerCAmelCase_ = torch.cuda.max_memory_allocated() lowerCAmelCase_ = bamb(self.end - self.begin ) lowerCAmelCase_ = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCamelCase ( a_ , a_ = 16 , a_ = "bert-base-cased" , a_ = 320 , a_ = 160 , ) -> Optional[int]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(a_ ) lowerCAmelCase_ = load_dataset( 'glue' , 'mrpc' , split={'train': F'''train[:{n_train}]''', 'validation': F'''validation[:{n_val}]'''} ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ = datasets.map( a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(a_ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(a_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowerCAmelCase_ = DataLoader( tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) lowerCAmelCase_ = DataLoader( tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]: # Initialize accelerator lowerCAmelCase_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ = config['lr'] lowerCAmelCase_ = int(config['num_epochs'] ) lowerCAmelCase_ = int(config['seed'] ) lowerCAmelCase_ = int(config['batch_size'] ) lowerCAmelCase_ = args.model_name_or_path set_seed(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(a_ , a_ , a_ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ ) # Instantiate optimizer lowerCAmelCase_ = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ = optimizer_cls(params=model.parameters() , lr=a_ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowerCAmelCase_ = 1 lowerCAmelCase_ = (len(a_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , ) else: lowerCAmelCase_ = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ = 0 # Now we train the model lowerCAmelCase_ = {} for epoch in range(a_ , a_ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(a_ ): lowerCAmelCase_ = model(**a_ ) lowerCAmelCase_ = outputs.loss lowerCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(a_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(a_ , a_ ) def lowerCamelCase ( ) -> Any: lowerCAmelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=a_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a_ , ) parser.add_argument( '--output_dir' , type=a_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=a_ , default=a_ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=a_ , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=a_ , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=a_ , default=1 , help='Number of train epochs.' , ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(a_ , a_ ) if __name__ == "__main__": main()
14
import baseaa def lowerCamelCase ( a_ ) -> bytes: return baseaa.baaencode(string.encode('utf-8' ) ) def lowerCamelCase ( a_ ) -> str: return baseaa.baadecode(a_ ).decode('utf-8' ) if __name__ == "__main__": lowerCamelCase_ = """Hello World!""" lowerCamelCase_ = baseaa_encode(test) print(encoded) lowerCamelCase_ = baseaa_decode(encoded) print(decoded)
14
1
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase_ = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } lowerCamelCase_ = { """Salesforce/codegen-350M-mono""": 2_0_4_8, } class a_ ( a_ ): '''simple docstring''' __a: str = VOCAB_FILES_NAMES __a: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: Tuple = ['''input_ids''', '''attention_mask'''] __a: int = CodeGenTokenizer def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_=False , **lowercase_ , ) -> int: '''simple docstring''' super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) if kwargs.pop('add_bos_token' , lowercase_ ): lowerCAmelCase_ = kwargs.pop('name_or_path' , '' ) raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.' ) lowerCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space: lowerCAmelCase_ = getattr(lowercase_ , pre_tok_state.pop('type' ) ) lowerCAmelCase_ = add_prefix_space lowerCAmelCase_ = pre_tok_class(**lowercase_ ) lowerCAmelCase_ = add_prefix_space def _lowercase ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding: '''simple docstring''' lowerCAmelCase_ = kwargs.get('is_split_into_words' , lowercase_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_ ) def _lowercase ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding: '''simple docstring''' lowerCAmelCase_ = kwargs.get('is_split_into_words' , lowercase_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]: '''simple docstring''' lowerCAmelCase_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = False , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> str: '''simple docstring''' lowerCAmelCase_ = super().decode( token_ids=lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , ) if truncate_before_pattern is not None and len(lowercase_ ) > 0: lowerCAmelCase_ = self.truncate(lowercase_ , lowercase_ ) return decoded_text def _lowercase ( self , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' def find_re(lowercase_ , lowercase_ , lowercase_ ): lowerCAmelCase_ = pattern.search(lowercase_ , lowercase_ ) return m.start() if m else -1 lowerCAmelCase_ = [re.compile(lowercase_ , re.MULTILINE ) for pattern in truncate_before_pattern] lowerCAmelCase_ = list(re.finditer('^print' , lowercase_ , re.MULTILINE ) ) if len(lowercase_ ) > 1: lowerCAmelCase_ = completion[: prints[1].start()] lowerCAmelCase_ = list(re.finditer('^def' , lowercase_ , re.MULTILINE ) ) if len(lowercase_ ) > 1: lowerCAmelCase_ = completion[: defs[1].start()] lowerCAmelCase_ = 0 lowerCAmelCase_ = [ pos for pos in [find_re(lowercase_ , lowercase_ , lowercase_ ) for terminal in terminals] if pos != -1 ] if len(lowercase_ ) > 0: return completion[: min(lowercase_ )] else: return completion
14
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int: if attention_mask is None: lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class a_ : '''simple docstring''' __a: Tuple = OPTConfig __a: Optional[Any] = {} __a: Tuple = '''gelu''' def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = embed_dim lowerCAmelCase_ = word_embed_proj_dim lowerCAmelCase_ = False def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , ) lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ ) return config, inputs_dict def _lowercase ( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = TFOPTModel(config=lowercase_ ) lowerCAmelCase_ = inputs_dict['input_ids'] lowerCAmelCase_ = input_ids[:1, :] lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :] lowerCAmelCase_ = 1 # first forward pass lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0] lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 ) @require_tf class a_ ( a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else () __a: Union[str, Any] = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) __a: int = False __a: List[Any] = False __a: Dict = False __a: List[Any] = 1_0 def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = TFOPTModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase_ , lowercase_ ): if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings lowerCAmelCase_ = model_class(config=lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCAmelCase_ = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase_ ) # check that weights remain the same after resizing lowerCAmelCase_ = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase_ ) lowerCAmelCase_ = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) def lowerCamelCase ( a_ ) -> Any: return tf.constant(a_ , dtype=tf.intaa ) @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' __a: Optional[int] = 9_9 def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowerCAmelCase_ = input_ids.shape[0] lowerCAmelCase_ = OPTConfig( vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' ) lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id ) with tf.GradientTape(): lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state lowerCAmelCase_ = (1, 1_1, 5_1_2) self.assertEqual(output.shape , lowercase_ ) lowerCAmelCase_ = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowerCAmelCase_ = 'facebook/opt-350m' def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model ) lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowerCAmelCase_ = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-125m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) lowerCAmelCase_ = 'left' # use different length sentences to test batching lowerCAmelCase_ = [ 'Hello, my dog is a little', 'Today, I', ] lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ ) lowerCAmelCase_ = inputs['input_ids'] lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] ) lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ ) lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ )
14
1
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCamelCase ( a_ ) -> float: return np.dot(a_ , a_ ) class a_ : '''simple docstring''' def __init__( self , *, lowercase_ = np.inf , lowercase_ = "linear" , lowercase_ = 0.0 , ) -> None: '''simple docstring''' lowerCAmelCase_ = regularization lowerCAmelCase_ = gamma if kernel == "linear": lowerCAmelCase_ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('gamma must be float or int' ) if not self.gamma > 0: raise ValueError('gamma must be > 0' ) lowerCAmelCase_ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowerCAmelCase_ = f'''Unknown kernel: {kernel}''' raise ValueError(lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ ) -> float: '''simple docstring''' return np.dot(lowercase_ , lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ ) -> float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _lowercase ( self , lowercase_ , lowercase_ ) -> None: '''simple docstring''' lowerCAmelCase_ = observations lowerCAmelCase_ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((lowerCAmelCase_) , ) = np.shape(lowercase_ ) def to_minimize(lowercase_ ) -> float: lowerCAmelCase_ = 0 ((lowerCAmelCase_) , ) = np.shape(lowercase_ ) for i in range(lowercase_ ): for j in range(lowercase_ ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(lowercase_ ) lowerCAmelCase_ = LinearConstraint(lowercase_ , 0 , 0 ) lowerCAmelCase_ = Bounds(0 , self.regularization ) lowerCAmelCase_ = minimize( lowercase_ , np.ones(lowercase_ ) , bounds=lowercase_ , constraints=[ly_contraint] ).x lowerCAmelCase_ = l_star # calculating mean offset of separation plane to points lowerCAmelCase_ = 0 for i in range(lowercase_ ): for j in range(lowercase_ ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) lowerCAmelCase_ = s / n def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , lowercase_ ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
14
lowerCamelCase_ = 6_5_5_2_1 def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 for plain_chr in plain_text: lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER lowerCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowerCamelCase_ = 5_0_0_0_0_0 lowerCamelCase_ , lowerCamelCase_ = os.path.split(__file__) lowerCamelCase_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def lowerCamelCase ( a_ , **a_ ) -> Union[str, Any]: lowerCAmelCase_ = dataset.map(**a_ ) @get_duration def lowerCamelCase ( a_ , **a_ ) -> List[str]: lowerCAmelCase_ = dataset.filter(**a_ ) def lowerCamelCase ( ) -> Any: lowerCAmelCase_ = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) lowerCAmelCase_ = generate_example_dataset( os.path.join(a_ , 'dataset.arrow' ) , a_ , num_examples=a_ ) lowerCAmelCase_ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=a_ ) def tokenize(a_ ): return tokenizer(examples['text'] ) lowerCAmelCase_ = map(a_ ) lowerCAmelCase_ = map(a_ , batched=a_ ) lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ ) with dataset.formatted_as(type='numpy' ): lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ ) with dataset.formatted_as(type='pandas' ): lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ ) with dataset.formatted_as(type='torch' , columns='numbers' ): lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ ) lowerCAmelCase_ = map(a_ , function=a_ , batched=a_ ) lowerCAmelCase_ = filter(a_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(a_ , 'wb' ) as f: f.write(json.dumps(a_ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
14
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_=False ) -> Tuple: lowerCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): lowerCAmelCase_ = 'segformer.encoder.' + key if key.startswith('backbone' ): lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' ) if "norm" in key: lowerCAmelCase_ = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' ) if "layer_norm1" in key: lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ = key[key.find('block' ) + len('block' )] lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' ) if "attn.q" in key: lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase_ = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase_ = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase_ = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' ) if key.startswith('head' ): lowerCAmelCase_ = key.replace('head' , 'classifier' ) lowerCAmelCase_ = value return new_state_dict def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ) return image @torch.no_grad() def lowerCamelCase ( a_ , a_ , a_ ) -> int: lowerCAmelCase_ = SegformerConfig() lowerCAmelCase_ = False # set attributes based on model_name lowerCAmelCase_ = 'huggingface/label-files' if "segformer" in model_name: lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: lowerCAmelCase_ = 150 lowerCAmelCase_ = 'ade20k-id2label.json' lowerCAmelCase_ = (1, 150, 128, 128) elif "city" in model_name: lowerCAmelCase_ = 19 lowerCAmelCase_ = 'cityscapes-id2label.json' lowerCAmelCase_ = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: lowerCAmelCase_ = True lowerCAmelCase_ = model_name[4:6] lowerCAmelCase_ = 1_000 lowerCAmelCase_ = 'imagenet-1k-id2label.json' lowerCAmelCase_ = (1, 1_000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 256 elif size == "b2": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 6, 3] elif size == "b3": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 18, 3] elif size == "b4": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 8, 27, 3] elif size == "b5": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) lowerCAmelCase_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ ) # prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) ) else: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_ ) # create HuggingFace model and load state dict if encoder_only: lowerCAmelCase_ = False lowerCAmelCase_ = SegformerForImageClassification(a_ ) else: lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass lowerCAmelCase_ = model(a_ ) lowerCAmelCase_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCAmelCase_ = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: lowerCAmelCase_ = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
14
1
from math import ceil def lowerCamelCase ( a_ , a_ ) -> str: lowerCAmelCase_ = list(range(0 , a_ ) ) lowerCAmelCase_ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCAmelCase_ = [] for i in device_map_blocks: if device_map_blocks.count(a_ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(a_ ) # Missing blocks lowerCAmelCase_ = [i for i in blocks if i not in device_map_blocks] lowerCAmelCase_ = [i for i in device_map_blocks if i not in blocks] if len(a_ ) != 0: raise ValueError( 'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.' ' These attention blocks were specified more than once: ' + str(a_ ) ) if len(a_ ) != 0: raise ValueError( 'There are attention blocks for this model that are not specified in the device_map. Add these attention ' 'blocks to a device on the device_map: ' + str(a_ ) ) if len(a_ ) != 0: raise ValueError( 'The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(a_ ) ) def lowerCamelCase ( a_ , a_ ) -> Optional[int]: lowerCAmelCase_ = list(range(a_ ) ) lowerCAmelCase_ = int(ceil(n_layers / len(a_ ) ) ) lowerCAmelCase_ = [layers[i : i + n_blocks] for i in range(0 , a_ , a_ )] return dict(zip(a_ , a_ ) )
14
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class a_ ( a_ , a_ ): '''simple docstring''' __a: Optional[Any] = '''nat''' __a: int = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = depths lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = num_heads lowerCAmelCase_ = kernel_size lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = hidden_act lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) lowerCAmelCase_ = layer_scale_init_value lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )] lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
14
1
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union lowerCamelCase_ = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class a_ : '''simple docstring''' __a: str __a: Optional[str] = None __a: Optional[Union[str, int]] = None __a: Optional[Union[str, int]] = None __a: Optional[Union[str, int]] = None def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = _str_to_version_tuple(self.version_str ) def __repr__( self ) -> Any: '''simple docstring''' return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def _lowercase ( self ) -> int: '''simple docstring''' return self.major, self.minor, self.patch def _lowercase ( self , lowercase_ ) -> List[str]: '''simple docstring''' if isinstance(lowercase_ , lowercase_ ): return Version(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): return other raise TypeError(f'''{other} (type {type(lowercase_ )}) cannot be compared to version.''' ) def __eq__( self , lowercase_ ) -> List[Any]: '''simple docstring''' try: lowerCAmelCase_ = self._validate_operand(lowercase_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self._validate_operand(lowercase_ ) return self.tuple < other.tuple def __hash__( self ) -> Optional[int]: '''simple docstring''' return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _lowercase ( cls , lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _lowercase ( self ) -> str: '''simple docstring''' return self.version_str def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = _VERSION_REG.match(a_ ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(a_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def lowerCamelCase ( a_ ) -> Union[str, Any]: return ".".join(str(a_ ) for v in version_tuple )
14
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) lowerCamelCase_ = """pytorch_model.bin""" lowerCamelCase_ = """pytorch_model.bin.index.json""" lowerCamelCase_ = """adapter_config.json""" lowerCamelCase_ = """adapter_model.bin""" lowerCamelCase_ = """adapter_model.safetensors""" lowerCamelCase_ = """tf_model.h5""" lowerCamelCase_ = """tf_model.h5.index.json""" lowerCamelCase_ = """model.ckpt""" lowerCamelCase_ = """flax_model.msgpack""" lowerCamelCase_ = """flax_model.msgpack.index.json""" lowerCamelCase_ = """model.safetensors""" lowerCamelCase_ = """model.safetensors.index.json""" lowerCamelCase_ = """config.json""" lowerCamelCase_ = """preprocessor_config.json""" lowerCamelCase_ = FEATURE_EXTRACTOR_NAME lowerCamelCase_ = """generation_config.json""" lowerCamelCase_ = """modelcard.json""" lowerCamelCase_ = """▁""" lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility lowerCamelCase_ = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def lowerCamelCase ( a_ ) -> Dict: if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: lowerCAmelCase_ = ( 'This example requires a source install from HuggingFace Transformers (see ' '`https://huggingface.co/docs/transformers/installation#install-from-source`),' ) else: lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ' 'versions of HuggingFace Transformers.' )
14
1
def lowerCamelCase ( a_ , a_ ) -> str: lowerCAmelCase_ = '' for word_or_phrase in separated: if not isinstance(a_ , a_ ): raise Exception('join() accepts only strings to be joined' ) joined += word_or_phrase + separator return joined.strip(a_ ) if __name__ == "__main__": from doctest import testmod testmod()
14
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCamelCase_ = transforms.Compose( [ transforms.Resize((2_5_6, 2_5_6)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCamelCase ( a_ ) -> List[str]: if isinstance(a_ , torch.Tensor ): return image elif isinstance(a_ , PIL.Image.Image ): lowerCAmelCase_ = [image] lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image] lowerCAmelCase_ = torch.stack(a_ ) return image class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ ) lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 ) lowerCAmelCase_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple: '''simple docstring''' if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' ) lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase_ = init_latents.shape lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents print('add noise to latents at timestep' , lowercase_ ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) lowerCAmelCase_ = init_latents return latents @torch.no_grad() def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(lowercase_ ) # 2. Preprocess image lowerCAmelCase_ = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device ) lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ ) lowerCAmelCase_ = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase_ = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
14
1
import colorsys from PIL import Image # type: ignore def lowerCamelCase ( a_ , a_ , a_ ) -> float: lowerCAmelCase_ = x lowerCAmelCase_ = y for step in range(a_ ): # noqa: B007 lowerCAmelCase_ = a * a - b * b + x lowerCAmelCase_ = 2 * a * b + y lowerCAmelCase_ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( a_ ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase ( a_ ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) ) def lowerCamelCase ( a_ = 800 , a_ = 600 , a_ = -0.6 , a_ = 0 , a_ = 3.2 , a_ = 50 , a_ = True , ) -> Image.Image: lowerCAmelCase_ = Image.new('RGB' , (image_width, image_height) ) lowerCAmelCase_ = img.load() # loop through the image-coordinates for image_x in range(a_ ): for image_y in range(a_ ): # determine the figure-coordinates based on the image-coordinates lowerCAmelCase_ = figure_width / image_width * image_height lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCAmelCase_ = get_distance(a_ , a_ , a_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCAmelCase_ = get_color_coded_rgb(a_ ) else: lowerCAmelCase_ = get_black_and_white_rgb(a_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowerCamelCase_ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
14
def lowerCamelCase ( a_ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) lowerCAmelCase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowerCAmelCase_ = 1 if upper_limit > 0: lowerCAmelCase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(a_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowerCamelCase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ ) -> List[List[ImageInput]]: if isinstance(a_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(a_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(a_ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class a_ ( a_ ): '''simple docstring''' __a: List[Any] = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = size if size is not None else {'shortest_edge': 2_2_4} lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowerCAmelCase_ = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} lowerCAmelCase_ = get_size_dict(lowercase_ , param_name='crop_size' ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = do_center_crop lowerCAmelCase_ = crop_size lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" in size: lowerCAmelCase_ = get_resize_output_image_size(lowercase_ , size['shortest_edge'] , default_to_square=lowercase_ ) elif "height" in size and "width" in size: lowerCAmelCase_ = (size['height'], size['width']) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Optional[int]: '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase_ = to_numpy_array(lowercase_ ) if do_resize: lowerCAmelCase_ = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) if do_center_crop: lowerCAmelCase_ = self.center_crop(lowercase_ , size=lowercase_ ) if do_rescale: lowerCAmelCase_ = self.rescale(image=lowercase_ , scale=lowercase_ ) if do_normalize: lowerCAmelCase_ = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) lowerCAmelCase_ = to_channel_dimension_format(lowercase_ , lowercase_ ) return image def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: '''simple docstring''' lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size lowerCAmelCase_ = get_size_dict(lowercase_ , param_name='crop_size' ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) lowerCAmelCase_ = make_batched(lowercase_ ) lowerCAmelCase_ = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] lowerCAmelCase_ = {'pixel_values': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
14
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(a_ ) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> Any: '''simple docstring''' super().__init__(*lowercase_ , **lowercase_ ) self.check_model_type(lowercase_ ) def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = {}, {} if padding is not None: lowerCAmelCase_ = padding if truncation is not None: lowerCAmelCase_ = truncation if top_k is not None: lowerCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int: '''simple docstring''' if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ): lowerCAmelCase_ = {'image': image, 'question': question} else: lowerCAmelCase_ = image lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ ) return results def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = load_image(inputs['image'] ) lowerCAmelCase_ = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ ) lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) return model_inputs def _lowercase ( self , lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self.model(**lowercase_ ) return model_outputs def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any: '''simple docstring''' if top_k > self.model.config.num_labels: lowerCAmelCase_ = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase_ = model_outputs.logits.sigmoid()[0] lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCAmelCase_ = scores.tolist() lowerCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
14
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ = 1_3 , lowercase_ = 6_4 , lowercase_ = 2 , lowercase_ = 3 , lowercase_ = 3 , lowercase_ = True , lowercase_ = True , lowercase_ = 1_2_8 , lowercase_=[1_6, 3_2, 6_4, 1_2_8] , lowercase_ = 7 , lowercase_ = 4 , lowercase_ = 3_7 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 1_0 , lowercase_ = 0.02 , lowercase_ = 2 , lowercase_ = 1 , lowercase_ = 1_2_8 , lowercase_ = [2, 2, 2, 2] , lowercase_ = 2 , lowercase_ = 2 , ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = encoder_stride lowerCAmelCase_ = num_attention_outputs lowerCAmelCase_ = embed_dim lowerCAmelCase_ = embed_dim + 1 lowerCAmelCase_ = resolution lowerCAmelCase_ = depths lowerCAmelCase_ = hidden_sizes lowerCAmelCase_ = dim lowerCAmelCase_ = mlp_expansion_ratio def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = self.get_config() return config, pixel_values, labels def _lowercase ( self ) -> str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = TFEfficientFormerModel(config=lowercase_ ) lowerCAmelCase_ = model(lowercase_ , training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = self.type_sequence_label_size lowerCAmelCase_ = TFEfficientFormerForImageClassification(lowercase_ ) lowerCAmelCase_ = model(lowercase_ , labels=lowercase_ , training=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase_ = 1 lowerCAmelCase_ = TFEfficientFormerForImageClassification(lowercase_ ) lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase_ = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs lowerCAmelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Any = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __a: int = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __a: Dict = False __a: Union[str, Any] = False __a: Dict = False __a: List[Any] = False __a: List[str] = False def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = TFEfficientFormerModelTester(self ) lowerCAmelCase_ = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7 ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def _lowercase ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' pass def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(lowercase_ ) lowerCAmelCase_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase_ ) def _lowercase ( self ) -> int: '''simple docstring''' def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ): lowerCAmelCase_ = model_class(lowercase_ ) lowerCAmelCase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ ) lowerCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase_ = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowercase_ ) , lowercase_ ) if hasattr(self.model_tester , 'encoder_seq_length' ): lowerCAmelCase_ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: lowerCAmelCase_ = seq_length * self.model_tester.chunk_length else: lowerCAmelCase_ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: lowerCAmelCase_ = outputs.decoder_hidden_states self.asseretIsInstance(lowercase_ , (list, tuple) ) self.assertEqual(len(lowercase_ ) , lowercase_ ) lowerCAmelCase_ = getattr(self.model_tester , 'seq_length' , lowercase_ ) lowerCAmelCase_ = getattr(self.model_tester , 'decoder_seq_length' , lowercase_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = TFEfficientFormerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = True lowerCAmelCase_ = getattr(self.model_tester , 'seq_length' , lowercase_ ) lowerCAmelCase_ = getattr(self.model_tester , 'encoder_seq_length' , lowercase_ ) lowerCAmelCase_ = getattr(self.model_tester , 'key_length' , lowercase_ ) lowerCAmelCase_ = getattr(self.model_tester , 'chunk_length' , lowercase_ ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): lowerCAmelCase_ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = True lowerCAmelCase_ = model_class(lowercase_ ) lowerCAmelCase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ ) lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase_ = True lowerCAmelCase_ = model_class(lowercase_ ) lowerCAmelCase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ ) lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model lowerCAmelCase_ = model_class(lowercase_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes lowerCAmelCase_ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } lowerCAmelCase_ = model(lowercase_ ) self.assertTrue(outputs_dict is not None ) def lowerCamelCase ( ) -> int: lowerCAmelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=lowercase_ , return_tensors='tf' ) # forward pass lowerCAmelCase_ = model(**lowercase_ , training=lowercase_ ) # verify the logits lowerCAmelCase_ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowerCAmelCase_ = tf.constant([-0.05_55, 0.48_25, -0.08_52] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) ) @slow def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=lowercase_ , return_tensors='tf' ) # forward pass lowerCAmelCase_ = model(**lowercase_ , training=lowercase_ ) # verify the logits lowerCAmelCase_ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowerCAmelCase_ = tf.constant([-0.13_12, 0.43_53, -1.04_99] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
14
def lowerCamelCase ( a_ ) -> bool: lowerCAmelCase_ = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowerCAmelCase_ = set() return any( node not in visited and depth_first_search(a_ , a_ , a_ , a_ ) for node in graph ) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool: visited.add(a_ ) rec_stk.add(a_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a_ , a_ , a_ , a_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
14
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: int = StableDiffusionInpaintPipeline __a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __a: int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __a: List[str] = frozenset([] ) def _lowercase ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , ) lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) lowerCAmelCase_ = CLIPTextModel(lowercase_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int: '''simple docstring''' lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) ) lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) ) if str(lowercase_ ).startswith('mps' ): lowerCAmelCase_ = torch.manual_seed(lowercase_ ) else: lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowerCAmelCase_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ ) lowerCAmelCase_ = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ ) lowerCAmelCase_ = sd_pipe(**lowercase_ ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Any: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowercase ( self ) -> List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' ) lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , ) lowerCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
14
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a_ ( a_ , unittest.TestCase ): '''simple docstring''' __a: Tuple = DiTPipeline __a: str = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __a: Optional[Any] = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } __a: Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __a: Optional[int] = False def _lowercase ( self ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ = TransformeraDModel( sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowercase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowercase_ , ) lowerCAmelCase_ = AutoencoderKL() lowerCAmelCase_ = DDIMScheduler() lowerCAmelCase_ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def _lowercase ( self , lowercase_ , lowercase_=0 ) -> Optional[int]: '''simple docstring''' if str(lowercase_ ).startswith('mps' ): lowerCAmelCase_ = torch.manual_seed(lowercase_ ) else: lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowerCAmelCase_ = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'cpu' lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ ) lowerCAmelCase_ = pipe(**lowercase_ ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 1_6, 1_6, 3) ) lowerCAmelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1e-3 ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=lowercase_ , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) lowerCAmelCase_ = ['vase', 'umbrella', 'white shark', 'white wolf'] lowerCAmelCase_ = pipe.get_label_ids(lowercase_ ) lowerCAmelCase_ = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=4_0 , output_type='np' ).images for word, image in zip(lowercase_ , lowercase_ ): lowerCAmelCase_ = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) lowerCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) lowerCAmelCase_ = ['vase', 'umbrella'] lowerCAmelCase_ = pipe.get_label_ids(lowercase_ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=2_5 , output_type='np' ).images for word, image in zip(lowercase_ , lowercase_ ): lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
14
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a_ : '''simple docstring''' __a: int __a: int class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = [[] for _ in range(lowercase_ )] lowerCAmelCase_ = size def __getitem__( self , lowercase_ ) -> Iterator[Edge]: '''simple docstring''' return iter(self._graph[vertex] ) @property def _lowercase ( self ) -> List[Any]: '''simple docstring''' return self._size def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) ) def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None: '''simple docstring''' lowerCAmelCase_ = deque([start_vertex] ) lowerCAmelCase_ = [None] * self.size lowerCAmelCase_ = 0 while queue: lowerCAmelCase_ = queue.popleft() lowerCAmelCase_ = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCAmelCase_ = current_distance + edge.weight lowerCAmelCase_ = distances[edge.destination_vertex] if ( isinstance(lowercase_ , lowercase_ ) and new_distance >= dest_vertex_distance ): continue lowerCAmelCase_ = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
14
1
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = inspect.getfile(accelerate.test_utils ) lowerCAmelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCAmelCase_ = test_metrics @require_cpu def _lowercase ( self ) -> Any: '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def _lowercase ( self ) -> List[Any]: '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' self.test_metrics.main() @require_multi_gpu def _lowercase ( self ) -> Tuple: '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCAmelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() )
14
from __future__ import annotations lowerCamelCase_ = 1_0 def lowerCamelCase ( a_ ) -> list[int]: lowerCAmelCase_ = 1 lowerCAmelCase_ = max(a_ ) while placement <= max_digit: # declare and initialize empty buckets lowerCAmelCase_ = [[] for _ in range(a_ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCAmelCase_ = int((i / placement) % RADIX ) buckets[tmp].append(a_ ) # put each buckets' contents into list_of_ints lowerCAmelCase_ = 0 for b in range(a_ ): for i in buckets[b]: lowerCAmelCase_ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
14
1
import os import pytest from transformers.dynamic_module_utils import get_imports lowerCamelCase_ = """ import os """ lowerCamelCase_ = """ def foo(): import os return False """ lowerCamelCase_ = """ def foo(): def bar(): if True: import os return False return bar() """ lowerCamelCase_ = """ import os try: import bar except ImportError: raise ValueError() """ lowerCamelCase_ = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ lowerCamelCase_ = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ lowerCamelCase_ = """ import os try: import bar except ImportError as e: raise ValueError() """ lowerCamelCase_ = """ import os try: import bar except: raise ValueError() """ lowerCamelCase_ = """ import os try: import bar import baz except ImportError: raise ValueError() """ lowerCamelCase_ = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ lowerCamelCase_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('case' , a_ ) def lowerCamelCase ( a_ , a_ ) -> Dict: lowerCAmelCase_ = os.path.join(a_ , 'test_file.py' ) with open(a_ , 'w' ) as _tmp_file: _tmp_file.write(a_ ) lowerCAmelCase_ = get_imports(a_ ) assert parsed_imports == ["os"]
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]: # load base model lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowerCAmelCase_ = load_file(a_ ) lowerCAmelCase_ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.text_encoder else: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.unet # find the target layer lowerCAmelCase_ = layer_infos.pop(0 ) while len(a_ ) > -1: try: lowerCAmelCase_ = curr_layer.__getattr__(a_ ) if len(a_ ) > 0: lowerCAmelCase_ = layer_infos.pop(0 ) elif len(a_ ) == 0: break except Exception: if len(a_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowerCAmelCase_ = layer_infos.pop(0 ) lowerCAmelCase_ = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(a_ ) else: pair_keys.append(a_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 ) else: lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ) # update visited list for item in pair_keys: visited.append(a_ ) return pipeline if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = args.base_model_path lowerCamelCase_ = args.checkpoint_path lowerCamelCase_ = args.dump_path lowerCamelCase_ = args.lora_prefix_unet lowerCamelCase_ = args.lora_prefix_text_encoder lowerCamelCase_ = args.alpha lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCamelCase_ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
def lowerCamelCase ( a_ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) lowerCAmelCase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowerCAmelCase_ = 1 if upper_limit > 0: lowerCAmelCase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(a_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowerCamelCase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase ( a_ ) -> Any: lowerCAmelCase_ = tmp_path / 'file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = tmp_path / 'malformed_file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ , a_ ) -> List[str]: lowerCAmelCase_ = tmp_path / 'csv_with_image.csv' lowerCAmelCase_ = textwrap.dedent( F'''\ image {image_file} ''' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = tmp_path / 'csv_with_label.csv' lowerCAmelCase_ = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv' lowerCAmelCase_ = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]: lowerCAmelCase_ = Csv() lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(a_ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(a_ ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase ( a_ ) -> Optional[Any]: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() lowerCAmelCase_ = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase ( a_ ) -> int: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1:] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() lowerCAmelCase_ = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels] def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) lowerCAmelCase_ = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
14
1
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowerCamelCase_ = 2_5_6_0_4_7 lowerCamelCase_ = 2_5_6_1_4_5 @require_sentencepiece @require_tokenizers class a_ ( a_ , unittest.TestCase ): '''simple docstring''' __a: int = NllbTokenizer __a: Union[str, Any] = NllbTokenizerFast __a: Dict = True __a: int = True __a: Tuple = {} def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ = NllbTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = NllbTokenizer(lowercase_ , keep_accents=lowercase_ ) lowerCAmelCase_ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ ) lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) lowerCAmelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ ) lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=True lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ ) lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=False lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ ) lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) @require_torch def _lowercase ( self ) -> Optional[int]: '''simple docstring''' if not self.test_seqaseq: return lowerCAmelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. lowerCAmelCase_ = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] lowerCAmelCase_ = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch( src_texts=lowercase_ , tgt_texts=lowercase_ , max_length=3 , max_target_length=1_0 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 1_0 ) # max_target_length will default to max_length if not specified lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch( lowercase_ , tgt_texts=lowercase_ , max_length=3 , return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch( src_texts=lowercase_ , max_length=3 , max_target_length=1_0 , return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('decoder_input_ids' , lowercase_ ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def _lowercase ( self ) -> Tuple: '''simple docstring''' pass def _lowercase ( self ) -> Tuple: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase_ = [AddedToken('<special>' , lstrip=lowercase_ )] lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ ) lowerCAmelCase_ = tokenizer_r.encode('Hey this is a <special> token' ) lowerCAmelCase_ = tokenizer_r.encode('<special>' , add_special_tokens=lowercase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained( lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ ) lowerCAmelCase_ = tokenizer_p.encode('Hey this is a <special> token' ) lowerCAmelCase_ = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(lowercase_ , lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class a_ ( unittest.TestCase ): '''simple docstring''' __a: List[Any] = '''facebook/nllb-200-distilled-600M''' __a: Tuple = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] __a: Dict = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] __a: Any = [ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def _lowercase ( cls ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' ) lowerCAmelCase_ = 1 return cls def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 2_5_6_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 2_5_6_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 2_5_6_0_5_7 ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) def _lowercase ( self ) -> str: '''simple docstring''' self.assertIn(lowercase_ , self.tokenizer.all_special_ids ) # fmt: off lowerCAmelCase_ = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7] # fmt: on lowerCAmelCase_ = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertNotIn(self.tokenizer.eos_token , lowercase_ ) def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = ['this is gunna be a long sentence ' * 2_0] assert isinstance(src_text[0] , lowercase_ ) lowerCAmelCase_ = 1_0 lowerCAmelCase_ = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , lowercase_ ) self.assertEqual(len(lowercase_ ) , lowercase_ ) def _lowercase ( self ) -> Dict: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_6_2_0_3, 3] ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase_ ) lowerCAmelCase_ = NllbTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ ) @require_torch def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) lowerCAmelCase_ = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 1_5) , batch.input_ids.shape ) self.assertEqual((2, 1_5) , batch.attention_mask.shape ) lowerCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase_ ) self.assertEqual(lowercase_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' ) lowerCAmelCase_ = self.tokenizer( text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=1_0 , return_tensors='pt' ) lowerCAmelCase_ = targets['input_ids'] lowerCAmelCase_ = shift_tokens_right( lowercase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(lowercase_ ) , { # A, test, EOS, en_XX 'input_ids': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 2_5_6_0_5_7, } , ) @require_torch def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] ) lowerCAmelCase_ = False lowerCAmelCase_ = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
14
from maths.prime_factors import prime_factors def lowerCamelCase ( a_ ) -> int: if not isinstance(a_ , a_ ): lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(a_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
14
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( a_ , a_ ) -> Tuple: lowerCAmelCase_ = XCLIPTextConfig() # derive patch size from model name lowerCAmelCase_ = model_name.find('patch' ) lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ ) if "large" in model_name: lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 lowerCAmelCase_ = 12 lowerCAmelCase_ = 1_024 lowerCAmelCase_ = 4_096 lowerCAmelCase_ = 16 lowerCAmelCase_ = 24 lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 if model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = 336 lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ ) if "large" in model_name: lowerCAmelCase_ = 768 return config def lowerCamelCase ( a_ ) -> List[str]: # text encoder if name == "token_embedding.weight": lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowerCAmelCase_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowerCAmelCase_ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowerCAmelCase_ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def lowerCamelCase ( a_ , a_ ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(a_ ) if "attn.in_proj" in key: lowerCAmelCase_ = key.split('.' ) if key.startswith('visual' ): lowerCAmelCase_ = key_split[3] lowerCAmelCase_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[ :dim ] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[ -dim: ] else: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] elif key.startswith('mit' ): lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.vision_config.mit_hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[dim : dim * 2, :] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = rename_key(a_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowerCAmelCase_ = val.T lowerCAmelCase_ = val return orig_state_dict def lowerCamelCase ( a_ ) -> List[str]: if num_frames == 8: lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowerCAmelCase_ = 'eating_spaghetti.npy' elif num_frames == 32: lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy' lowerCAmelCase_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , ) lowerCAmelCase_ = np.load(a_ ) return list(a_ ) def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]: lowerCAmelCase_ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowerCAmelCase_ = model_to_url[model_name] lowerCAmelCase_ = 8 if "16-frames" in model_name: lowerCAmelCase_ = 16 elif "shot" in model_name: lowerCAmelCase_ = 32 lowerCAmelCase_ = get_xclip_config(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) model.eval() if "drive" in checkpoint_url: lowerCAmelCase_ = 'pytorch_model.bin' gdown.cached_download(a_ , a_ , quiet=a_ ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model'] else: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model'] lowerCAmelCase_ = convert_state_dict(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ ) lowerCAmelCase_ = prepare_video(a_ ) lowerCAmelCase_ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) # Verify outputs lowerCAmelCase_ = outputs.logits_per_video lowerCAmelCase_ = logits_per_video.softmax(dim=1 ) print('Probs:' , a_ ) # kinetics-400 if model_name == "xclip-base-patch32": lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(a_ , a_ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(a_ , organization='nielsr' ) processor.push_to_hub(a_ , organization='nielsr' ) slow_tokenizer.push_to_hub(a_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
1
from pathlib import Path import numpy as np from PIL import Image def lowerCamelCase ( a_ ) -> np.ndarray: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def lowerCamelCase ( a_ ) -> np.ndarray: return (gray > 127) & (gray <= 255) def lowerCamelCase ( a_ , a_ ) -> np.ndarray: lowerCAmelCase_ = np.zeros_like(a_ ) lowerCAmelCase_ = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowerCAmelCase_ = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowerCAmelCase_ = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowerCAmelCase_ = int(summation > 0 ) return output if __name__ == "__main__": # read original image lowerCamelCase_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" lowerCamelCase_ = np.array(Image.open(lena_path)) # kernel to be applied lowerCamelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) lowerCamelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image lowerCamelCase_ = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
14
def lowerCamelCase ( a_ , a_ ) -> List[Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: lowerCAmelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
14
1
from random import shuffle import tensorflow as tf from numpy import array def lowerCamelCase ( a_ , a_ ) -> int: lowerCAmelCase_ = int(a_ ) assert noofclusters < len(a_ ) # Find out the dimensionality lowerCAmelCase_ = len(vectors[0] ) # Will help select random centroids from among the available vectors lowerCAmelCase_ = list(range(len(a_ ) ) ) shuffle(a_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowerCAmelCase_ = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowerCAmelCase_ = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowerCAmelCase_ = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(a_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowerCAmelCase_ = tf.placeholder('float64' , [dim] ) lowerCAmelCase_ = [] for centroid in centroids: cent_assigns.append(tf.assign(a_ , a_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowerCAmelCase_ = [tf.Variable(0 ) for i in range(len(a_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowerCAmelCase_ = tf.placeholder('int32' ) lowerCAmelCase_ = [] for assignment in assignments: cluster_assigns.append(tf.assign(a_ , a_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowerCAmelCase_ = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowerCAmelCase_ = tf.reduce_mean(a_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowerCAmelCase_ = tf.placeholder('float' , [dim] ) lowerCAmelCase_ = tf.placeholder('float' , [dim] ) lowerCAmelCase_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a_ , a_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowerCAmelCase_ = tf.placeholder('float' , [noofclusters] ) lowerCAmelCase_ = tf.argmin(a_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowerCAmelCase_ = tf.initialize_all_variables() # Initialize all variables sess.run(a_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowerCAmelCase_ = 100 for _ in range(a_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(a_ ) ): lowerCAmelCase_ = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowerCAmelCase_ = [ sess.run(a_ , feed_dict={va: vect, va: sess.run(a_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowerCAmelCase_ = sess.run( a_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(a_ ): # Collect all the vectors assigned to this cluster lowerCAmelCase_ = [ vectors[i] for i in range(len(a_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowerCAmelCase_ = sess.run( a_ , feed_dict={mean_input: array(a_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowerCAmelCase_ = sess.run(a_ ) lowerCAmelCase_ = sess.run(a_ ) return centroids, assignments
14
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class a_ ( a_ ): '''simple docstring''' __a: str = ['''vqvae'''] def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ ) def _lowercase ( self ) -> int: '''simple docstring''' return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0 @torch.no_grad() def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' lowerCAmelCase_ = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase_ , device=self.device , ) lowerCAmelCase_ = noise lowerCAmelCase_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase_ , lowercase_ ) lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ ) lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample( generator=lowercase_ )[0] lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase_ = int(mask_start_secs * pixels_per_second ) lowerCAmelCase_ = int(mask_end_secs * pixels_per_second ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase_ ): lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample'] else: lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] if isinstance(self.scheduler , lowercase_ ): lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample'] else: lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample'] if mask is not None: if mask_start > 0: lowerCAmelCase_ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample'] lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' ) lowerCAmelCase_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) ) lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , lowercase_ ) self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase_ = self.scheduler.alphas_cumprod[t] lowerCAmelCase_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase_ = 1 - alpha_prod_t lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor: '''simple docstring''' lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
14
1
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase ( a_ ) -> Any: lowerCAmelCase_ = tmp_path / 'file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = tmp_path / 'malformed_file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ , a_ ) -> List[str]: lowerCAmelCase_ = tmp_path / 'csv_with_image.csv' lowerCAmelCase_ = textwrap.dedent( F'''\ image {image_file} ''' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = tmp_path / 'csv_with_label.csv' lowerCAmelCase_ = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv' lowerCAmelCase_ = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]: lowerCAmelCase_ = Csv() lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(a_ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(a_ ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase ( a_ ) -> Optional[Any]: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() lowerCAmelCase_ = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase ( a_ ) -> int: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1:] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() lowerCAmelCase_ = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels] def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) lowerCAmelCase_ = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
14
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]: def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ): lowerCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: lowerCAmelCase_ = math.ceil(val / multiple ) * multiple return x lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = output_size # determine new height and width lowerCAmelCase_ = output_height / input_height lowerCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCAmelCase_ = scale_width else: # fit height lowerCAmelCase_ = scale_height lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ ) lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ ) return (new_height, new_width) class a_ ( a_ ): '''simple docstring''' __a: Union[str, Any] = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4} lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCAmelCase_ = get_resize_output_image_size( lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict: '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: '''simple docstring''' lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowerCAmelCase_ = {'pixel_values': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowercase_ ): lowerCAmelCase_ = target_sizes.numpy() lowerCAmelCase_ = [] for idx in range(len(lowercase_ ) ): lowerCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ ) lowerCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: lowerCAmelCase_ = logits.argmax(dim=1 ) lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
14
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class a_ ( a_ , a_ ): '''simple docstring''' @register_to_config def __init__( self , lowercase_ = 7_6_8 , ) -> Optional[Any]: '''simple docstring''' super().__init__() lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , lowercase_ ) ) lowerCAmelCase_ = nn.Parameter(torch.ones(1 , lowercase_ ) ) def _lowercase ( self , lowercase_ = None , lowercase_ = None , ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = nn.Parameter(self.mean.to(lowercase_ ).to(lowercase_ ) ) lowerCAmelCase_ = nn.Parameter(self.std.to(lowercase_ ).to(lowercase_ ) ) return self def _lowercase ( self , lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = (embeds - self.mean) * 1.0 / self.std return embeds def _lowercase ( self , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = (embeds * self.std) + self.mean return embeds
14
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
1
from __future__ import annotations def lowerCamelCase ( a_ , a_ = None , a_ = None , a_ = False , ) -> tuple[int, float, str]: lowerCAmelCase_ = cipher_alphabet or [chr(a_ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCAmelCase_ = { 'a': 0.08_497, 'b': 0.01_492, 'c': 0.02_202, 'd': 0.04_253, 'e': 0.11_162, 'f': 0.02_228, 'g': 0.02_015, 'h': 0.06_094, 'i': 0.07_546, 'j': 0.00_153, 'k': 0.01_292, 'l': 0.04_025, 'm': 0.02_406, 'n': 0.06_749, 'o': 0.07_507, 'p': 0.01_929, 'q': 0.00_095, 'r': 0.07_587, 's': 0.06_327, 't': 0.09_356, 'u': 0.02_758, 'v': 0.00_978, 'w': 0.02_560, 'x': 0.00_150, 'y': 0.01_994, 'z': 0.00_077, } else: # Custom frequencies dictionary lowerCAmelCase_ = frequencies_dict if not case_sensitive: lowerCAmelCase_ = ciphertext.lower() # Chi squared statistic values lowerCAmelCase_ = {} # cycle through all of the shifts for shift in range(len(a_ ) ): lowerCAmelCase_ = '' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCAmelCase_ = (alphabet_letters.index(letter.lower() ) - shift) % len( a_ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCAmelCase_ = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCAmelCase_ = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCAmelCase_ = decrypted_with_shift.lower().count(a_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCAmelCase_ = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCAmelCase_ = decrypted_with_shift.count(a_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCAmelCase_ = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCAmelCase_ = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(a_ ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCAmelCase_ = min( a_ , key=a_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
14
from __future__ import annotations import queue class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = data lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCamelCase ( ) -> TreeNode: print('\n********Press N to stop entering at any point of time********\n' ) lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower() lowerCAmelCase_ = queue.Queue() lowerCAmelCase_ = TreeNode(int(a_ ) ) q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = left_node q.put(a_ ) lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = right_node q.put(a_ ) raise def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = [] while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(a_ ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(a_ ) lowerCAmelCase_ = n.left # end of while means current node doesn't have left child lowerCAmelCase_ = stack.pop() # start to traverse its right child lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: stack.append(a_ ) lowerCAmelCase_ = n.left lowerCAmelCase_ = stack.pop() print(n.data , end=',' ) lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ , lowerCAmelCase_ = [], [] lowerCAmelCase_ = node stacka.append(a_ ) while stacka: # to find the reversed order of post order, store it in stack2 lowerCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(a_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str: if not s: return "\n" + width * char lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowerCamelCase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 5_0 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
14
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase_ = { """configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""], """processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["""VisionTextDualEncoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["""FlaxVisionTextDualEncoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["""TFVisionTextDualEncoderModel"""] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
14
import baseaa def lowerCamelCase ( a_ ) -> bytes: return baseaa.baaencode(string.encode('utf-8' ) ) def lowerCamelCase ( a_ ) -> str: return baseaa.baadecode(a_ ).decode('utf-8' ) if __name__ == "__main__": lowerCamelCase_ = """Hello World!""" lowerCamelCase_ = baseaa_encode(test) print(encoded) lowerCamelCase_ = baseaa_decode(encoded) print(decoded)
14
1
def lowerCamelCase ( a_ , a_ ) -> list: lowerCAmelCase_ = len(a_ ) lowerCAmelCase_ = [] for i in range(len(a_ ) - pat_len + 1 ): lowerCAmelCase_ = True for j in range(a_ ): if s[i + j] != pattern[j]: lowerCAmelCase_ = False break if match_found: position.append(a_ ) return position if __name__ == "__main__": assert naive_pattern_search("""ABCDEFG""", """DE""") == [3] print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
14
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int: if attention_mask is None: lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class a_ : '''simple docstring''' __a: Tuple = OPTConfig __a: Optional[Any] = {} __a: Tuple = '''gelu''' def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = embed_dim lowerCAmelCase_ = word_embed_proj_dim lowerCAmelCase_ = False def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , ) lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ ) return config, inputs_dict def _lowercase ( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = TFOPTModel(config=lowercase_ ) lowerCAmelCase_ = inputs_dict['input_ids'] lowerCAmelCase_ = input_ids[:1, :] lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :] lowerCAmelCase_ = 1 # first forward pass lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0] lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 ) @require_tf class a_ ( a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else () __a: Union[str, Any] = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) __a: int = False __a: List[Any] = False __a: Dict = False __a: List[Any] = 1_0 def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = TFOPTModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase_ , lowercase_ ): if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings lowerCAmelCase_ = model_class(config=lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCAmelCase_ = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase_ ) # check that weights remain the same after resizing lowerCAmelCase_ = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase_ ) lowerCAmelCase_ = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) def lowerCamelCase ( a_ ) -> Any: return tf.constant(a_ , dtype=tf.intaa ) @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' __a: Optional[int] = 9_9 def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowerCAmelCase_ = input_ids.shape[0] lowerCAmelCase_ = OPTConfig( vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' ) lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id ) with tf.GradientTape(): lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state lowerCAmelCase_ = (1, 1_1, 5_1_2) self.assertEqual(output.shape , lowercase_ ) lowerCAmelCase_ = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowerCAmelCase_ = 'facebook/opt-350m' def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model ) lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowerCAmelCase_ = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-125m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) lowerCAmelCase_ = 'left' # use different length sentences to test batching lowerCAmelCase_ = [ 'Hello, my dog is a little', 'Today, I', ] lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ ) lowerCAmelCase_ = inputs['input_ids'] lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] ) lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ ) lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ )
14
1
from __future__ import annotations import pandas as pd def lowerCamelCase ( a_ , a_ , a_ ) -> list[int]: lowerCAmelCase_ = [0] * no_of_processes lowerCAmelCase_ = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(a_ ): lowerCAmelCase_ = burst_time[i] lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = 999_999_999 lowerCAmelCase_ = 0 lowerCAmelCase_ = False # Process until all processes are completed while complete != no_of_processes: for j in range(a_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: lowerCAmelCase_ = remaining_time[j] lowerCAmelCase_ = j lowerCAmelCase_ = True if not check: increment_time += 1 continue remaining_time[short] -= 1 lowerCAmelCase_ = remaining_time[short] if minm == 0: lowerCAmelCase_ = 999_999_999 if remaining_time[short] == 0: complete += 1 lowerCAmelCase_ = False # Find finish time of current process lowerCAmelCase_ = increment_time + 1 # Calculate waiting time lowerCAmelCase_ = finish_time - arrival_time[short] lowerCAmelCase_ = finar - burst_time[short] if waiting_time[short] < 0: lowerCAmelCase_ = 0 # Increment time increment_time += 1 return waiting_time def lowerCamelCase ( a_ , a_ , a_ ) -> list[int]: lowerCAmelCase_ = [0] * no_of_processes for i in range(a_ ): lowerCAmelCase_ = burst_time[i] + waiting_time[i] return turn_around_time def lowerCamelCase ( a_ , a_ , a_ ) -> None: lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 for i in range(a_ ): lowerCAmelCase_ = total_waiting_time + waiting_time[i] lowerCAmelCase_ = total_turn_around_time + turn_around_time[i] print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' ) print('Average turn around time =' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("""Enter how many process you want to analyze""") lowerCamelCase_ = int(input()) lowerCamelCase_ = [0] * no_of_processes lowerCamelCase_ = [0] * no_of_processes lowerCamelCase_ = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("""Enter the arrival time and burst time for process:--""" + str(i + 1)) lowerCamelCase_ , lowerCamelCase_ = map(int, input().split()) lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase_ = burst_time lowerCamelCase_ = no_of_processes lowerCamelCase_ = waiting_time lowerCamelCase_ = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) lowerCamelCase_ = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ """Process""", """BurstTime""", """ArrivalTime""", """WaitingTime""", """TurnAroundTime""", ], ) # Printing the dataFrame pd.set_option("""display.max_rows""", fcfs.shape[0] + 1) print(fcfs)
14
lowerCamelCase_ = 6_5_5_2_1 def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 for plain_chr in plain_text: lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER lowerCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase ( a_ ) -> int: if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(a_ ): return ext raise Exception( F'''Unable to determine file format from file extension {path}. ''' F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def lowerCamelCase ( a_ ) -> Any: lowerCAmelCase_ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) lowerCAmelCase_ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format lowerCAmelCase_ = PipelineDataFormat.from_str( format=a_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(a_ , a_ ) class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ = nlp lowerCAmelCase_ = reader @staticmethod def _lowercase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = parser.add_parser('run' , help='Run a pipeline through the CLI' ) run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' ) run_parser.add_argument('--input' , type=lowercase_ , help='Path to the file to use for inference' ) run_parser.add_argument('--output' , type=lowercase_ , help='Path to the file that will be used post to write results.' ) run_parser.add_argument('--model' , type=lowercase_ , help='Name or path to the model to instantiate.' ) run_parser.add_argument('--config' , type=lowercase_ , help='Name or path to the model\'s config to instantiate.' ) run_parser.add_argument( '--tokenizer' , type=lowercase_ , help='Name of the tokenizer to use. (default: same as the model name)' ) run_parser.add_argument( '--column' , type=lowercase_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , ) run_parser.add_argument( '--format' , type=lowercase_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , ) run_parser.add_argument( '--device' , type=lowercase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' ) run_parser.set_defaults(func=lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self._nlp, [] for entry in self._reader: lowerCAmelCase_ = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): outputs.append(lowercase_ ) else: outputs += output # Saving data if self._nlp.binary_output: lowerCAmelCase_ = self._reader.save_binary(lowercase_ ) logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(lowercase_ )
14
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_=False ) -> Tuple: lowerCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): lowerCAmelCase_ = 'segformer.encoder.' + key if key.startswith('backbone' ): lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' ) if "norm" in key: lowerCAmelCase_ = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' ) if "layer_norm1" in key: lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ = key[key.find('block' ) + len('block' )] lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' ) if "attn.q" in key: lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase_ = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase_ = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase_ = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' ) if key.startswith('head' ): lowerCAmelCase_ = key.replace('head' , 'classifier' ) lowerCAmelCase_ = value return new_state_dict def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ) return image @torch.no_grad() def lowerCamelCase ( a_ , a_ , a_ ) -> int: lowerCAmelCase_ = SegformerConfig() lowerCAmelCase_ = False # set attributes based on model_name lowerCAmelCase_ = 'huggingface/label-files' if "segformer" in model_name: lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: lowerCAmelCase_ = 150 lowerCAmelCase_ = 'ade20k-id2label.json' lowerCAmelCase_ = (1, 150, 128, 128) elif "city" in model_name: lowerCAmelCase_ = 19 lowerCAmelCase_ = 'cityscapes-id2label.json' lowerCAmelCase_ = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: lowerCAmelCase_ = True lowerCAmelCase_ = model_name[4:6] lowerCAmelCase_ = 1_000 lowerCAmelCase_ = 'imagenet-1k-id2label.json' lowerCAmelCase_ = (1, 1_000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 256 elif size == "b2": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 6, 3] elif size == "b3": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 18, 3] elif size == "b4": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 8, 27, 3] elif size == "b5": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) lowerCAmelCase_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ ) # prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) ) else: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_ ) # create HuggingFace model and load state dict if encoder_only: lowerCAmelCase_ = False lowerCAmelCase_ = SegformerForImageClassification(a_ ) else: lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass lowerCAmelCase_ = model(a_ ) lowerCAmelCase_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCAmelCase_ = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: lowerCAmelCase_ = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
14
1
def lowerCamelCase ( a_ ) -> bool: lowerCAmelCase_ = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowerCAmelCase_ = set() return any( node not in visited and depth_first_search(a_ , a_ , a_ , a_ ) for node in graph ) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool: visited.add(a_ ) rec_stk.add(a_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a_ , a_ , a_ , a_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
14
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class a_ ( a_ , a_ ): '''simple docstring''' __a: Optional[Any] = '''nat''' __a: int = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = depths lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = num_heads lowerCAmelCase_ = kernel_size lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = hidden_act lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) lowerCAmelCase_ = layer_scale_init_value lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )] lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
14
1
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
14
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) lowerCamelCase_ = """pytorch_model.bin""" lowerCamelCase_ = """pytorch_model.bin.index.json""" lowerCamelCase_ = """adapter_config.json""" lowerCamelCase_ = """adapter_model.bin""" lowerCamelCase_ = """adapter_model.safetensors""" lowerCamelCase_ = """tf_model.h5""" lowerCamelCase_ = """tf_model.h5.index.json""" lowerCamelCase_ = """model.ckpt""" lowerCamelCase_ = """flax_model.msgpack""" lowerCamelCase_ = """flax_model.msgpack.index.json""" lowerCamelCase_ = """model.safetensors""" lowerCamelCase_ = """model.safetensors.index.json""" lowerCamelCase_ = """config.json""" lowerCamelCase_ = """preprocessor_config.json""" lowerCamelCase_ = FEATURE_EXTRACTOR_NAME lowerCamelCase_ = """generation_config.json""" lowerCamelCase_ = """modelcard.json""" lowerCamelCase_ = """▁""" lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility lowerCamelCase_ = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def lowerCamelCase ( a_ ) -> Dict: if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: lowerCAmelCase_ = ( 'This example requires a source install from HuggingFace Transformers (see ' '`https://huggingface.co/docs/transformers/installation#install-from-source`),' ) else: lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ' 'versions of HuggingFace Transformers.' )
14
1
from __future__ import annotations def lowerCamelCase ( a_ , a_ , a_ ) -> tuple[float, list[float]]: lowerCAmelCase_ = list(range(len(a_ ) ) ) lowerCAmelCase_ = [v / w for v, w in zip(a_ , a_ )] index.sort(key=lambda a_ : ratio[i] , reverse=a_ ) lowerCAmelCase_ = 0 lowerCAmelCase_ = [0] * len(a_ ) for i in index: if weight[i] <= capacity: lowerCAmelCase_ = 1 max_value += value[i] capacity -= weight[i] else: lowerCAmelCase_ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCamelCase_ = transforms.Compose( [ transforms.Resize((2_5_6, 2_5_6)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCamelCase ( a_ ) -> List[str]: if isinstance(a_ , torch.Tensor ): return image elif isinstance(a_ , PIL.Image.Image ): lowerCAmelCase_ = [image] lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image] lowerCAmelCase_ = torch.stack(a_ ) return image class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ ) lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 ) lowerCAmelCase_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple: '''simple docstring''' if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' ) lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase_ = init_latents.shape lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents print('add noise to latents at timestep' , lowercase_ ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) lowerCAmelCase_ = init_latents return latents @torch.no_grad() def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(lowercase_ ) # 2. Preprocess image lowerCAmelCase_ = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device ) lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ ) lowerCAmelCase_ = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase_ = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
14
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: int = StableDiffusionInpaintPipeline __a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __a: int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __a: List[str] = frozenset([] ) def _lowercase ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , ) lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) lowerCAmelCase_ = CLIPTextModel(lowercase_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int: '''simple docstring''' lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) ) lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) ) if str(lowercase_ ).startswith('mps' ): lowerCAmelCase_ = torch.manual_seed(lowercase_ ) else: lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowerCAmelCase_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ ) lowerCAmelCase_ = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ ) lowerCAmelCase_ = sd_pipe(**lowercase_ ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Any: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowercase ( self ) -> List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' ) lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , ) lowerCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
14
def lowerCamelCase ( a_ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) lowerCAmelCase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowerCAmelCase_ = 1 if upper_limit > 0: lowerCAmelCase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(a_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowerCamelCase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { """configuration_clap""": [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapAudioConfig""", """ClapConfig""", """ClapTextConfig""", ], """processing_clap""": ["""ClapProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapModel""", """ClapPreTrainedModel""", """ClapTextModel""", """ClapTextModelWithProjection""", """ClapAudioModel""", """ClapAudioModelWithProjection""", ] lowerCamelCase_ = ["""ClapFeatureExtractor"""] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(a_ ) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> Any: '''simple docstring''' super().__init__(*lowercase_ , **lowercase_ ) self.check_model_type(lowercase_ ) def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = {}, {} if padding is not None: lowerCAmelCase_ = padding if truncation is not None: lowerCAmelCase_ = truncation if top_k is not None: lowerCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int: '''simple docstring''' if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ): lowerCAmelCase_ = {'image': image, 'question': question} else: lowerCAmelCase_ = image lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ ) return results def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = load_image(inputs['image'] ) lowerCAmelCase_ = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ ) lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) return model_inputs def _lowercase ( self , lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self.model(**lowercase_ ) return model_outputs def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any: '''simple docstring''' if top_k > self.model.config.num_labels: lowerCAmelCase_ = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase_ = model_outputs.logits.sigmoid()[0] lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCAmelCase_ = scores.tolist() lowerCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
14
1
import baseaa def lowerCamelCase ( a_ ) -> bytes: return baseaa.baaencode(string.encode('utf-8' ) ) def lowerCamelCase ( a_ ) -> str: return baseaa.baadecode(a_ ).decode('utf-8' ) if __name__ == "__main__": lowerCamelCase_ = """Hello World!""" lowerCamelCase_ = baseaa_encode(test) print(encoded) lowerCamelCase_ = baseaa_decode(encoded) print(decoded)
14
def lowerCamelCase ( a_ ) -> bool: lowerCAmelCase_ = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowerCAmelCase_ = set() return any( node not in visited and depth_first_search(a_ , a_ , a_ , a_ ) for node in graph ) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool: visited.add(a_ ) rec_stk.add(a_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a_ , a_ , a_ , a_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
14
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCamelCase_ = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["""OwlViTFeatureExtractor"""] lowerCamelCase_ = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: int = StableDiffusionInpaintPipeline __a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __a: int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __a: List[str] = frozenset([] ) def _lowercase ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , ) lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) lowerCAmelCase_ = CLIPTextModel(lowercase_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int: '''simple docstring''' lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) ) lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) ) if str(lowercase_ ).startswith('mps' ): lowerCAmelCase_ = torch.manual_seed(lowercase_ ) else: lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowerCAmelCase_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ ) lowerCAmelCase_ = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ ) lowerCAmelCase_ = sd_pipe(**lowercase_ ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Any: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowercase ( self ) -> List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' ) lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , ) lowerCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
14
1
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' __a: List[Any] = ['''input_values''', '''attention_mask'''] def __init__( self , lowercase_ = 1 , lowercase_ = 1_6_0_0_0 , lowercase_ = 0.0 , lowercase_ = False , lowercase_ = 8_0 , lowercase_ = 1_6 , lowercase_ = 6_4 , lowercase_ = "hann_window" , lowercase_ = 1.0 , lowercase_ = 8_0 , lowercase_ = 7_6_0_0 , lowercase_ = 1e-10 , lowercase_ = 2 , lowercase_ = True , **lowercase_ , ) -> Dict: '''simple docstring''' super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ ) lowerCAmelCase_ = do_normalize lowerCAmelCase_ = return_attention_mask lowerCAmelCase_ = num_mel_bins lowerCAmelCase_ = hop_length lowerCAmelCase_ = win_length lowerCAmelCase_ = win_function lowerCAmelCase_ = frame_signal_scale lowerCAmelCase_ = fmin lowerCAmelCase_ = fmax lowerCAmelCase_ = mel_floor lowerCAmelCase_ = reduction_factor lowerCAmelCase_ = win_length * sampling_rate // 1_0_0_0 lowerCAmelCase_ = hop_length * sampling_rate // 1_0_0_0 lowerCAmelCase_ = optimal_fft_length(self.sample_size ) lowerCAmelCase_ = (self.n_fft // 2) + 1 lowerCAmelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ ) lowerCAmelCase_ = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _lowercase ( lowercase_ , lowercase_ , lowercase_ = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: lowerCAmelCase_ = np.array(lowercase_ , np.intaa ) lowerCAmelCase_ = [] for vector, length in zip(lowercase_ , attention_mask.sum(-1 ) ): lowerCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: lowerCAmelCase_ = padding_value normed_input_values.append(lowercase_ ) else: lowerCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def _lowercase ( self , lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = spectrogram( lowercase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchFeature: '''simple docstring''' if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: lowerCAmelCase_ = self._process_audio( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , ) else: lowerCAmelCase_ = None if audio_target is not None: lowerCAmelCase_ = self._process_audio( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , ) if inputs is None: return inputs_target else: lowerCAmelCase_ = inputs_target['input_values'] lowerCAmelCase_ = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: lowerCAmelCase_ = decoder_attention_mask return inputs def _lowercase ( self , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchFeature: '''simple docstring''' lowerCAmelCase_ = isinstance(lowercase_ , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase_ = is_batched_numpy or ( isinstance(lowercase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(lowercase_ , np.ndarray ): lowerCAmelCase_ = np.asarray(lowercase_ , dtype=np.floataa ) elif isinstance(lowercase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): lowerCAmelCase_ = speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase_ = [speech] # needed to make pad() work on spectrogram inputs lowerCAmelCase_ = self.feature_size # convert into correct format for padding if is_target: lowerCAmelCase_ = [self._extract_mel_features(lowercase_ ) for waveform in speech] lowerCAmelCase_ = BatchFeature({'input_values': features} ) lowerCAmelCase_ = self.num_mel_bins else: lowerCAmelCase_ = BatchFeature({'input_values': speech} ) lowerCAmelCase_ = self.pad( lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , ) lowerCAmelCase_ = feature_size_hack # convert input values to correct format lowerCAmelCase_ = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for array in input_values] elif ( not isinstance(lowercase_ , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): lowerCAmelCase_ = [array.astype(np.floataa ) for array in input_values] elif isinstance(lowercase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): lowerCAmelCase_ = input_values.astype(np.floataa ) # convert attention_mask to correct format lowerCAmelCase_ = padded_inputs.get('attention_mask' ) if attention_mask is not None: lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: lowerCAmelCase_ = ( attention_mask if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD else None ) lowerCAmelCase_ = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=lowercase_ , padding_value=self.padding_value ) if return_tensors is not None: lowerCAmelCase_ = padded_inputs.convert_to_tensors(lowercase_ ) return padded_inputs def _lowercase ( self ) -> Dict[str, Any]: '''simple docstring''' lowerCAmelCase_ = super().to_dict() # Don't serialize these as they are derived from the other properties. lowerCAmelCase_ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
14
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a_ : '''simple docstring''' __a: int __a: int class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = [[] for _ in range(lowercase_ )] lowerCAmelCase_ = size def __getitem__( self , lowercase_ ) -> Iterator[Edge]: '''simple docstring''' return iter(self._graph[vertex] ) @property def _lowercase ( self ) -> List[Any]: '''simple docstring''' return self._size def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) ) def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None: '''simple docstring''' lowerCAmelCase_ = deque([start_vertex] ) lowerCAmelCase_ = [None] * self.size lowerCAmelCase_ = 0 while queue: lowerCAmelCase_ = queue.popleft() lowerCAmelCase_ = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCAmelCase_ = current_distance + edge.weight lowerCAmelCase_ = distances[edge.destination_vertex] if ( isinstance(lowercase_ , lowercase_ ) and new_distance >= dest_vertex_distance ): continue lowerCAmelCase_ = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
14
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase ( *a_ ) -> List[Any]: with open(a_ , 'r' ) as fh: fcntl.flock(a_ , fcntl.LOCK_EX ) try: print(*a_ ) finally: fcntl.flock(a_ , fcntl.LOCK_UN ) lowerCamelCase_ = int(os.environ["""LOCAL_RANK"""]) torch.cuda.set_device(local_rank) lowerCamelCase_ = torch.device("""cuda""", local_rank) lowerCamelCase_ = socket.gethostname() lowerCamelCase_ = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group("""nccl""") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank lowerCamelCase_ = dist.get_rank() lowerCamelCase_ = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
14
from __future__ import annotations lowerCamelCase_ = 1_0 def lowerCamelCase ( a_ ) -> list[int]: lowerCAmelCase_ = 1 lowerCAmelCase_ = max(a_ ) while placement <= max_digit: # declare and initialize empty buckets lowerCAmelCase_ = [[] for _ in range(a_ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCAmelCase_ = int((i / placement) % RADIX ) buckets[tmp].append(a_ ) # put each buckets' contents into list_of_ints lowerCAmelCase_ = 0 for b in range(a_ ): for i in buckets[b]: lowerCAmelCase_ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
14
1
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : '''simple docstring''' __a: int __a: Node | None = None __a: Node | None = None def lowerCamelCase ( ) -> Node | None: lowerCAmelCase_ = Node(1 ) lowerCAmelCase_ = Node(2 ) lowerCAmelCase_ = Node(3 ) lowerCAmelCase_ = Node(4 ) lowerCAmelCase_ = Node(5 ) return tree def lowerCamelCase ( a_ ) -> list[int]: return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowerCamelCase ( a_ ) -> list[int]: return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowerCamelCase ( a_ ) -> list[int]: return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowerCamelCase ( a_ ) -> int: return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowerCamelCase ( a_ ) -> Sequence[Node | None]: lowerCAmelCase_ = [] if root is None: return output lowerCAmelCase_ = deque([root] ) while process_queue: lowerCAmelCase_ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowerCamelCase ( a_ , a_ ) -> Sequence[Node | None]: lowerCAmelCase_ = [] def populate_output(a_ , a_ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(a_ , a_ ) return output def lowerCamelCase ( a_ , a_ ) -> Sequence[Node | None]: lowerCAmelCase_ = [] def populate_output(a_ , a_ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(a_ , a_ ) return output def lowerCamelCase ( a_ ) -> Sequence[Node | None] | list[Any]: if root is None: return [] lowerCAmelCase_ = [] lowerCAmelCase_ = 0 lowerCAmelCase_ = height(a_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(a_ , a_ ) ) lowerCAmelCase_ = 1 else: output.append(get_nodes_from_right_to_left(a_ , a_ ) ) lowerCAmelCase_ = 0 return output def lowerCamelCase ( ) -> None: # Main function for testing. lowerCAmelCase_ = make_tree() print(F'''In-order Traversal: {inorder(a_ )}''' ) print(F'''Pre-order Traversal: {preorder(a_ )}''' ) print(F'''Post-order Traversal: {postorder(a_ )}''' , '\n' ) print(F'''Height of Tree: {height(a_ )}''' , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(a_ ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(a_ ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(a_ , level=a_ ) ) print('\nZigZag order Traversal: ' ) print(zigzag(a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]: # load base model lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowerCAmelCase_ = load_file(a_ ) lowerCAmelCase_ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.text_encoder else: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.unet # find the target layer lowerCAmelCase_ = layer_infos.pop(0 ) while len(a_ ) > -1: try: lowerCAmelCase_ = curr_layer.__getattr__(a_ ) if len(a_ ) > 0: lowerCAmelCase_ = layer_infos.pop(0 ) elif len(a_ ) == 0: break except Exception: if len(a_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowerCAmelCase_ = layer_infos.pop(0 ) lowerCAmelCase_ = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(a_ ) else: pair_keys.append(a_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 ) else: lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ) # update visited list for item in pair_keys: visited.append(a_ ) return pipeline if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = args.base_model_path lowerCamelCase_ = args.checkpoint_path lowerCamelCase_ = args.dump_path lowerCamelCase_ = args.lora_prefix_unet lowerCamelCase_ = args.lora_prefix_text_encoder lowerCamelCase_ = args.alpha lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCamelCase_ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
from __future__ import annotations class a_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = text, pattern lowerCAmelCase_ , lowerCAmelCase_ = len(lowercase_ ), len(lowercase_ ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _lowercase ( self ) -> list[int]: '''simple docstring''' lowerCAmelCase_ = [] for i in range(self.textLen - self.patLen + 1 ): lowerCAmelCase_ = self.mismatch_in_text(lowercase_ ) if mismatch_index == -1: positions.append(lowercase_ ) else: lowerCAmelCase_ = self.match_in_pattern(self.text[mismatch_index] ) lowerCAmelCase_ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCamelCase_ = """ABAABA""" lowerCamelCase_ = """AB""" lowerCamelCase_ = BoyerMooreSearch(text, pattern) lowerCamelCase_ = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
14
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase ( a_ ) -> Any: lowerCAmelCase_ = tmp_path / 'file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = tmp_path / 'malformed_file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ , a_ ) -> List[str]: lowerCAmelCase_ = tmp_path / 'csv_with_image.csv' lowerCAmelCase_ = textwrap.dedent( F'''\ image {image_file} ''' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = tmp_path / 'csv_with_label.csv' lowerCAmelCase_ = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv' lowerCAmelCase_ = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]: lowerCAmelCase_ = Csv() lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(a_ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(a_ ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase ( a_ ) -> Optional[Any]: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() lowerCAmelCase_ = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase ( a_ ) -> int: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1:] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() lowerCAmelCase_ = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels] def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) lowerCAmelCase_ = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
14
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker lowerCamelCase_ = """CompVis/stable-diffusion-v1-1""" lowerCamelCase_ = """CompVis/stable-diffusion-v1-2""" lowerCamelCase_ = """CompVis/stable-diffusion-v1-3""" lowerCamelCase_ = """CompVis/stable-diffusion-v1-4""" class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Optional[int]: '''simple docstring''' super()._init_() lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowercase_ ) lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowercase_ ) lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowercase_ ) lowerCAmelCase_ = StableDiffusionPipeline( vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , requires_safety_checker=lowercase_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _lowercase ( self ) -> Dict[str, Any]: '''simple docstring''' return {k: getattr(self , lowercase_ ) for k in self.config.keys() if not k.startswith('_' )} def _lowercase ( self , lowercase_ = "auto" ) -> Optional[int]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_ ) def _lowercase ( self ) -> Tuple: '''simple docstring''' self.enable_attention_slicing(lowercase_ ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> Union[str, Any]: '''simple docstring''' return self.pipea( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]: '''simple docstring''' return self.pipea( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> Optional[Any]: '''simple docstring''' return self.pipea( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> Any: '''simple docstring''' return self.pipea( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(lowercase_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCAmelCase_ = self.textaimg_sda_a( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCAmelCase_ = self.textaimg_sda_a( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCAmelCase_ = self.textaimg_sda_a( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCAmelCase_ = self.textaimg_sda_a( prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
14
from maths.prime_factors import prime_factors def lowerCamelCase ( a_ ) -> int: if not isinstance(a_ , a_ ): lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(a_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
14
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class a_ ( a_ ): '''simple docstring''' __a: str = '''canine''' def __init__( self , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_6_3_8_4 , lowercase_=1_6 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_=0XE000 , lowercase_=0XE001 , lowercase_=4 , lowercase_=4 , lowercase_=8 , lowercase_=1_6_3_8_4 , lowercase_=1_2_8 , **lowercase_ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = layer_norm_eps # Character config: lowerCAmelCase_ = downsampling_rate lowerCAmelCase_ = upsampling_kernel_size lowerCAmelCase_ = num_hash_functions lowerCAmelCase_ = num_hash_buckets lowerCAmelCase_ = local_transformer_stride
14
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( a_ , a_ ) -> Tuple: lowerCAmelCase_ = XCLIPTextConfig() # derive patch size from model name lowerCAmelCase_ = model_name.find('patch' ) lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ ) if "large" in model_name: lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 lowerCAmelCase_ = 12 lowerCAmelCase_ = 1_024 lowerCAmelCase_ = 4_096 lowerCAmelCase_ = 16 lowerCAmelCase_ = 24 lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 if model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = 336 lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ ) if "large" in model_name: lowerCAmelCase_ = 768 return config def lowerCamelCase ( a_ ) -> List[str]: # text encoder if name == "token_embedding.weight": lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowerCAmelCase_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowerCAmelCase_ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowerCAmelCase_ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def lowerCamelCase ( a_ , a_ ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(a_ ) if "attn.in_proj" in key: lowerCAmelCase_ = key.split('.' ) if key.startswith('visual' ): lowerCAmelCase_ = key_split[3] lowerCAmelCase_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[ :dim ] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[ -dim: ] else: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] elif key.startswith('mit' ): lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.vision_config.mit_hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[dim : dim * 2, :] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = rename_key(a_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowerCAmelCase_ = val.T lowerCAmelCase_ = val return orig_state_dict def lowerCamelCase ( a_ ) -> List[str]: if num_frames == 8: lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowerCAmelCase_ = 'eating_spaghetti.npy' elif num_frames == 32: lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy' lowerCAmelCase_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , ) lowerCAmelCase_ = np.load(a_ ) return list(a_ ) def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]: lowerCAmelCase_ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowerCAmelCase_ = model_to_url[model_name] lowerCAmelCase_ = 8 if "16-frames" in model_name: lowerCAmelCase_ = 16 elif "shot" in model_name: lowerCAmelCase_ = 32 lowerCAmelCase_ = get_xclip_config(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) model.eval() if "drive" in checkpoint_url: lowerCAmelCase_ = 'pytorch_model.bin' gdown.cached_download(a_ , a_ , quiet=a_ ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model'] else: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model'] lowerCAmelCase_ = convert_state_dict(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ ) lowerCAmelCase_ = prepare_video(a_ ) lowerCAmelCase_ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) # Verify outputs lowerCAmelCase_ = outputs.logits_per_video lowerCAmelCase_ = logits_per_video.softmax(dim=1 ) print('Probs:' , a_ ) # kinetics-400 if model_name == "xclip-base-patch32": lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(a_ , a_ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(a_ , organization='nielsr' ) processor.push_to_hub(a_ , organization='nielsr' ) slow_tokenizer.push_to_hub(a_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
1
def lowerCamelCase ( a_ , a_ ) -> List[Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: lowerCAmelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
14
def lowerCamelCase ( a_ , a_ ) -> List[Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: lowerCAmelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
14
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self , lowercase_ ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = 3 lowerCAmelCase_ = 2_5_0 lowerCAmelCase_ = ids_tensor((batch_size, length) , lowercase_ ) lowerCAmelCase_ = torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length return input_ids, scores def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(5 ) lowerCAmelCase_ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(1_0 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = MaxLengthCriteria(max_length=1_0 ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(5 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(1_0 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(5 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(1_0 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self._get_tensors(5 ) lowerCAmelCase_ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) lowerCAmelCase_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def _lowercase ( self ) -> Tuple: '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(lowercase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) lowerCAmelCase_ = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(lowercase_ ) , 1 )
14
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class a_ ( a_ ): '''simple docstring''' __a: str = ['''vqvae'''] def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ ) def _lowercase ( self ) -> int: '''simple docstring''' return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0 @torch.no_grad() def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' lowerCAmelCase_ = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase_ , device=self.device , ) lowerCAmelCase_ = noise lowerCAmelCase_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase_ , lowercase_ ) lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ ) lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample( generator=lowercase_ )[0] lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase_ = int(mask_start_secs * pixels_per_second ) lowerCAmelCase_ = int(mask_end_secs * pixels_per_second ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase_ ): lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample'] else: lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] if isinstance(self.scheduler , lowercase_ ): lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample'] else: lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample'] if mask is not None: if mask_start > 0: lowerCAmelCase_ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample'] lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' ) lowerCAmelCase_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) ) lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , lowercase_ ) self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase_ = self.scheduler.alphas_cumprod[t] lowerCAmelCase_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase_ = 1 - alpha_prod_t lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor: '''simple docstring''' lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
14
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase_ = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase_ = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase_ = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase_ = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2, """facebook/dpr-ctx_encoder-multiset-base""": 5_1_2, } lowerCamelCase_ = { """facebook/dpr-question_encoder-single-nq-base""": 5_1_2, """facebook/dpr-question_encoder-multiset-base""": 5_1_2, } lowerCamelCase_ = { """facebook/dpr-reader-single-nq-base""": 5_1_2, """facebook/dpr-reader-multiset-base""": 5_1_2, } lowerCamelCase_ = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCamelCase_ = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCamelCase_ = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class a_ ( a_ ): '''simple docstring''' __a: Any = VOCAB_FILES_NAMES __a: List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __a: Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __a: Optional[Any] = DPRContextEncoderTokenizer class a_ ( a_ ): '''simple docstring''' __a: int = VOCAB_FILES_NAMES __a: List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __a: Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a: List[str] = DPRQuestionEncoderTokenizer lowerCamelCase_ = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCamelCase_ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCamelCase_ = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(a_ ) class a_ : '''simple docstring''' def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , ) elif titles is None or texts is None: lowerCAmelCase_ = titles if texts is None else texts return super().__call__( lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , ) lowerCAmelCase_ = titles if not isinstance(lowercase_ , lowercase_ ) else [titles] lowerCAmelCase_ = texts if not isinstance(lowercase_ , lowercase_ ) else [texts] lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages assert len(lowercase_ ) == len( lowercase_ ), f'''There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.''' lowerCAmelCase_ = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )['input_ids'] lowerCAmelCase_ = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )['input_ids'] lowerCAmelCase_ = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ ) ] } if return_attention_mask is not False: lowerCAmelCase_ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCAmelCase_ = attention_mask return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = 1_6 , lowercase_ = 6_4 , lowercase_ = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' lowerCAmelCase_ = reader_input['input_ids'] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = reader_output[:3] lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ ) lowerCAmelCase_ = [] for doc_id in sorted_docs: lowerCAmelCase_ = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCAmelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCAmelCase_ = sequence_ids.index(self.pad_token_id ) else: lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowercase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[DPRSpanPrediction]: '''simple docstring''' lowerCAmelCase_ = [] for start_index, start_score in enumerate(lowercase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCAmelCase_ = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ ) lowerCAmelCase_ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' lowerCAmelCase_ = end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowercase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(a_ ) class a_ ( a_ , a_ ): '''simple docstring''' __a: Any = VOCAB_FILES_NAMES __a: Dict = READER_PRETRAINED_VOCAB_FILES_MAP __a: Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: List[Any] = READER_PRETRAINED_INIT_CONFIGURATION __a: Tuple = ['''input_ids''', '''attention_mask'''] __a: Any = DPRReaderTokenizer
14
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]: def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ): lowerCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: lowerCAmelCase_ = math.ceil(val / multiple ) * multiple return x lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = output_size # determine new height and width lowerCAmelCase_ = output_height / input_height lowerCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCAmelCase_ = scale_width else: # fit height lowerCAmelCase_ = scale_height lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ ) lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ ) return (new_height, new_width) class a_ ( a_ ): '''simple docstring''' __a: Union[str, Any] = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4} lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCAmelCase_ = get_resize_output_image_size( lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict: '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: '''simple docstring''' lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowerCAmelCase_ = {'pixel_values': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowercase_ ): lowerCAmelCase_ = target_sizes.numpy() lowerCAmelCase_ = [] for idx in range(len(lowercase_ ) ): lowerCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ ) lowerCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: lowerCAmelCase_ = logits.argmax(dim=1 ) lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
14
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase ( a_ , a_ ) -> int: lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('wer' ) lowerCAmelCase_ = load_metric('cer' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['target'] , predictions=result['prediction'] ) lowerCAmelCase_ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results lowerCAmelCase_ = F'''WER: {wer_result}\nCER: {cer_result}''' print(a_ ) with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(a_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = F'''log_{dataset_id}_predictions.txt''' lowerCAmelCase_ = F'''log_{dataset_id}_targets.txt''' with open(a_ , 'w' ) as p, open(a_ , 'w' ) as t: # mapping function to write output def write_to_file(a_ , a_ ): p.write(F'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(F'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(a_ , with_indices=a_ ) def lowerCamelCase ( a_ ) -> str: lowerCAmelCase_ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(a_ , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: lowerCAmelCase_ = ' '.join(text.split(a_ ) ) return text def lowerCamelCase ( a_ ) -> str: # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('audio' , Audio(sampling_rate=a_ ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(a_ ): lowerCAmelCase_ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['text'] lowerCAmelCase_ = normalize_text(batch['sentence'] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(a_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(a_ , a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) lowerCamelCase_ = parser.parse_args() main(args)
14
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata lowerCamelCase_ = """""" if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""): class a_ ( tr.AbstractTransform ): '''simple docstring''' def __init__( self , lowercase_ = " " ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = sentence_delimiter def _lowercase ( self , lowercase_ ) -> str: '''simple docstring''' return list(lowercase_ ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ = [] for sent_idx, sentence in enumerate(lowercase_ ): chars.extend(self.process_string(lowercase_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1: chars.append(self.sentence_delimiter ) return chars lowerCamelCase_ = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: lowerCamelCase_ = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) lowerCamelCase_ = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ lowerCamelCase_ = """\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. """ lowerCamelCase_ = """ Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> cer = datasets.load_metric(\"cer\") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): '''simple docstring''' def _lowercase ( self ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', 'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates', ] , ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=False ) -> Any: '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"] lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 for prediction, reference in zip(lowercase_ , lowercase_ ): lowerCAmelCase_ = jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
14
from __future__ import annotations import queue class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = data lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCamelCase ( ) -> TreeNode: print('\n********Press N to stop entering at any point of time********\n' ) lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower() lowerCAmelCase_ = queue.Queue() lowerCAmelCase_ = TreeNode(int(a_ ) ) q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = left_node q.put(a_ ) lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = right_node q.put(a_ ) raise def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = [] while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(a_ ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(a_ ) lowerCAmelCase_ = n.left # end of while means current node doesn't have left child lowerCAmelCase_ = stack.pop() # start to traverse its right child lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: stack.append(a_ ) lowerCAmelCase_ = n.left lowerCAmelCase_ = stack.pop() print(n.data , end=',' ) lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ , lowerCAmelCase_ = [], [] lowerCAmelCase_ = node stacka.append(a_ ) while stacka: # to find the reversed order of post order, store it in stack2 lowerCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(a_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str: if not s: return "\n" + width * char lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowerCamelCase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 5_0 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
14
1
from statistics import mean import numpy as np def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> list: lowerCAmelCase_ = 0 # Number of processes finished lowerCAmelCase_ = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCAmelCase_ = [0] * no_of_process # List to include calculation results lowerCAmelCase_ = [0] * no_of_process # Sort by arrival time. lowerCAmelCase_ = [burst_time[i] for i in np.argsort(a_ )] lowerCAmelCase_ = [process_name[i] for i in np.argsort(a_ )] arrival_time.sort() while no_of_process > finished_process_count: lowerCAmelCase_ = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCAmelCase_ = arrival_time[i] lowerCAmelCase_ = 0 # Index showing the location of the process being performed lowerCAmelCase_ = 0 # Saves the current response ratio. lowerCAmelCase_ = 0 for i in range(0 , a_ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCAmelCase_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCAmelCase_ = temp lowerCAmelCase_ = i # Calculate the turn around time lowerCAmelCase_ = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCAmelCase_ = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> list: lowerCAmelCase_ = [0] * no_of_process for i in range(0 , a_ ): lowerCAmelCase_ = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": lowerCamelCase_ = 5 lowerCamelCase_ = ["""A""", """B""", """C""", """D""", """E"""] lowerCamelCase_ = [1, 2, 3, 4, 5] lowerCamelCase_ = [1, 2, 3, 4, 5] lowerCamelCase_ = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) lowerCamelCase_ = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(f'''average waiting time : {mean(waiting_time):.5f}''') print(f'''average turn around time : {mean(turn_around_time):.5f}''')
14
import baseaa def lowerCamelCase ( a_ ) -> bytes: return baseaa.baaencode(string.encode('utf-8' ) ) def lowerCamelCase ( a_ ) -> str: return baseaa.baadecode(a_ ).decode('utf-8' ) if __name__ == "__main__": lowerCamelCase_ = """Hello World!""" lowerCamelCase_ = baseaa_encode(test) print(encoded) lowerCamelCase_ = baseaa_decode(encoded) print(decoded)
14
1
import collections import importlib.util import os import re from pathlib import Path lowerCamelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCamelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCamelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCamelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCamelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCamelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCamelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCamelCase_ = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCamelCase_ = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCamelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCamelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: lowerCamelCase_ = re.compile(r"""^\s*else:""") def lowerCamelCase ( a_ ) -> Tuple: if _re_test_backend.search(a_ ) is None: return None lowerCAmelCase_ = [b[0] for b in _re_backend.findall(a_ )] backends.sort() return "_and_".join(a_ ) def lowerCamelCase ( a_ ) -> Any: with open(a_ , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase_ = f.readlines() lowerCAmelCase_ = 0 while line_index < len(a_ ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(a_ ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase_ = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(a_ ): lowerCAmelCase_ = _re_one_line_import_struct.search(a_ ).groups()[0] lowerCAmelCase_ = re.findall('\[([^\]]+)\]' , a_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue lowerCAmelCase_ = _re_import_struct_key_value.search(a_ ) if single_line_import_search is not None: lowerCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(a_ ) > 0] objects.extend(a_ ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase_ = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): lowerCAmelCase_ = lines[line_index] if _re_import_struct_add_one.search(a_ ) is not None: objects.append(_re_import_struct_add_one.search(a_ ).groups()[0] ) elif _re_import_struct_add_many.search(a_ ) is not None: lowerCAmelCase_ = _re_import_struct_add_many.search(a_ ).groups()[0].split(', ' ) lowerCAmelCase_ = [obj[1:-1] for obj in imports if len(a_ ) > 0] objects.extend(a_ ) elif _re_between_brackets.search(a_ ) is not None: lowerCAmelCase_ = _re_between_brackets.search(a_ ).groups()[0].split(', ' ) lowerCAmelCase_ = [obj[1:-1] for obj in imports if len(a_ ) > 0] objects.extend(a_ ) elif _re_quote_object.search(a_ ) is not None: objects.append(_re_quote_object.search(a_ ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase_ = [] while ( line_index < len(a_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): lowerCAmelCase_ = lines[line_index] lowerCAmelCase_ = _re_import.search(a_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase_ = {'none': objects} # Let's continue with backend-specific objects while line_index < len(a_ ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): lowerCAmelCase_ = lines[line_index] lowerCAmelCase_ = _re_import.search(a_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowerCamelCase ( a_ , a_ ) -> Any: def find_duplicates(a_ ): return [k for k, v in collections.Counter(a_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase_ = [] for key in import_dict_objects.keys(): lowerCAmelCase_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowerCAmelCase_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase_ = 'base imports' if key == 'none' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def lowerCamelCase ( ) -> Union[str, Any]: lowerCAmelCase_ = [] for root, _, files in os.walk(a_ ): if "__init__.py" in files: lowerCAmelCase_ = os.path.join(a_ , '__init__.py' ) lowerCAmelCase_ = parse_init(a_ ) if objects is not None: lowerCAmelCase_ = analyze_results(*a_ ) if len(a_ ) > 0: lowerCAmelCase_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('\n'.join(a_ ) ) if len(a_ ) > 0: raise ValueError('\n\n'.join(a_ ) ) def lowerCamelCase ( ) -> List[str]: lowerCAmelCase_ = [] for path, directories, files in os.walk(a_ ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(a_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(a_ ) / folder).glob('*.py' ) ) ) == 0: continue lowerCAmelCase_ = str((Path(a_ ) / folder).relative_to(a_ ) ) lowerCAmelCase_ = short_path.replace(os.path.sep , '.' ) submodules.append(a_ ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase_ = str((Path(a_ ) / fname).relative_to(a_ ) ) lowerCAmelCase_ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(a_ ) return submodules lowerCamelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def lowerCamelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase_ = importlib.util.spec_from_file_location( 'transformers' , os.path.join(a_ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase_ = spec.loader.load_module() lowerCAmelCase_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(a_ ) > 0: lowerCAmelCase_ = '\n'.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F'''{list_of_modules}\n''' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
14
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int: if attention_mask is None: lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class a_ : '''simple docstring''' __a: Tuple = OPTConfig __a: Optional[Any] = {} __a: Tuple = '''gelu''' def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = embed_dim lowerCAmelCase_ = word_embed_proj_dim lowerCAmelCase_ = False def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , ) lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ ) return config, inputs_dict def _lowercase ( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = TFOPTModel(config=lowercase_ ) lowerCAmelCase_ = inputs_dict['input_ids'] lowerCAmelCase_ = input_ids[:1, :] lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :] lowerCAmelCase_ = 1 # first forward pass lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0] lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 ) @require_tf class a_ ( a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else () __a: Union[str, Any] = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) __a: int = False __a: List[Any] = False __a: Dict = False __a: List[Any] = 1_0 def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = TFOPTModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase_ , lowercase_ ): if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings lowerCAmelCase_ = model_class(config=lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCAmelCase_ = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase_ ) # check that weights remain the same after resizing lowerCAmelCase_ = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase_ ) lowerCAmelCase_ = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) def lowerCamelCase ( a_ ) -> Any: return tf.constant(a_ , dtype=tf.intaa ) @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' __a: Optional[int] = 9_9 def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowerCAmelCase_ = input_ids.shape[0] lowerCAmelCase_ = OPTConfig( vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' ) lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id ) with tf.GradientTape(): lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state lowerCAmelCase_ = (1, 1_1, 5_1_2) self.assertEqual(output.shape , lowercase_ ) lowerCAmelCase_ = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowerCAmelCase_ = 'facebook/opt-350m' def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model ) lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowerCAmelCase_ = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-125m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) lowerCAmelCase_ = 'left' # use different length sentences to test batching lowerCAmelCase_ = [ 'Hello, my dog is a little', 'Today, I', ] lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ ) lowerCAmelCase_ = inputs['input_ids'] lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] ) lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ ) lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ )
14
1
from __future__ import annotations import queue class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = data lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCamelCase ( ) -> TreeNode: print('\n********Press N to stop entering at any point of time********\n' ) lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower() lowerCAmelCase_ = queue.Queue() lowerCAmelCase_ = TreeNode(int(a_ ) ) q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = left_node q.put(a_ ) lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = right_node q.put(a_ ) raise def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = [] while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(a_ ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(a_ ) lowerCAmelCase_ = n.left # end of while means current node doesn't have left child lowerCAmelCase_ = stack.pop() # start to traverse its right child lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: stack.append(a_ ) lowerCAmelCase_ = n.left lowerCAmelCase_ = stack.pop() print(n.data , end=',' ) lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ , lowerCAmelCase_ = [], [] lowerCAmelCase_ = node stacka.append(a_ ) while stacka: # to find the reversed order of post order, store it in stack2 lowerCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(a_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str: if not s: return "\n" + width * char lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowerCamelCase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 5_0 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
14
lowerCamelCase_ = 6_5_5_2_1 def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 for plain_chr in plain_text: lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER lowerCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
def lowerCamelCase ( a_ = 600_851_475_143 ) -> int: try: lowerCAmelCase_ = int(a_ ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) lowerCAmelCase_ = 2 lowerCAmelCase_ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowerCAmelCase_ = i while n % i == 0: lowerCAmelCase_ = n // i i += 1 return int(a_ ) if __name__ == "__main__": print(f'''{solution() = }''')
14
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_=False ) -> Tuple: lowerCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): lowerCAmelCase_ = 'segformer.encoder.' + key if key.startswith('backbone' ): lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' ) if "norm" in key: lowerCAmelCase_ = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' ) if "layer_norm1" in key: lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ = key[key.find('block' ) + len('block' )] lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' ) if "attn.q" in key: lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase_ = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase_ = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase_ = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' ) if key.startswith('head' ): lowerCAmelCase_ = key.replace('head' , 'classifier' ) lowerCAmelCase_ = value return new_state_dict def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ) return image @torch.no_grad() def lowerCamelCase ( a_ , a_ , a_ ) -> int: lowerCAmelCase_ = SegformerConfig() lowerCAmelCase_ = False # set attributes based on model_name lowerCAmelCase_ = 'huggingface/label-files' if "segformer" in model_name: lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: lowerCAmelCase_ = 150 lowerCAmelCase_ = 'ade20k-id2label.json' lowerCAmelCase_ = (1, 150, 128, 128) elif "city" in model_name: lowerCAmelCase_ = 19 lowerCAmelCase_ = 'cityscapes-id2label.json' lowerCAmelCase_ = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: lowerCAmelCase_ = True lowerCAmelCase_ = model_name[4:6] lowerCAmelCase_ = 1_000 lowerCAmelCase_ = 'imagenet-1k-id2label.json' lowerCAmelCase_ = (1, 1_000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 256 elif size == "b2": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 6, 3] elif size == "b3": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 18, 3] elif size == "b4": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 8, 27, 3] elif size == "b5": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) lowerCAmelCase_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ ) # prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) ) else: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_ ) # create HuggingFace model and load state dict if encoder_only: lowerCAmelCase_ = False lowerCAmelCase_ = SegformerForImageClassification(a_ ) else: lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass lowerCAmelCase_ = model(a_ ) lowerCAmelCase_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCAmelCase_ = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: lowerCAmelCase_ = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
14
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class a_ ( a_ ): '''simple docstring''' __a: Tuple = (DDPMScheduler,) def _lowercase ( self , **lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**lowercase_ ) return config def _lowercase ( self ) -> List[Any]: '''simple docstring''' for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , ) def _lowercase ( self ) -> str: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=lowercase_ ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**lowercase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5 def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**lowercase_ ) lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for t in reversed(range(lowercase_ ) ): # 1. predict noise residual lowerCAmelCase_ = model(lowercase_ , lowercase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(lowercase_ ) ) lowerCAmelCase_ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2 assert abs(result_mean.item() - 0.33_72 ) < 1e-3 def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase_ = scheduler_class(**lowercase_ ) lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for t in reversed(range(lowercase_ ) ): # 1. predict noise residual lowerCAmelCase_ = model(lowercase_ , lowercase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(lowercase_ ) ) lowerCAmelCase_ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2 assert abs(result_mean.item() - 0.26_31 ) < 1e-3 def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**lowercase_ ) lowerCAmelCase_ = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=lowercase_ ) lowerCAmelCase_ = scheduler.timesteps for i, timestep in enumerate(lowercase_ ): if i == len(lowercase_ ) - 1: lowerCAmelCase_ = -1 else: lowerCAmelCase_ = timesteps[i + 1] lowerCAmelCase_ = scheduler.previous_timestep(lowercase_ ) lowerCAmelCase_ = prev_t.item() self.assertEqual(lowercase_ , lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**lowercase_ ) lowerCAmelCase_ = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(lowercase_ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=lowercase_ ) def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**lowercase_ ) lowerCAmelCase_ = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase_ = len(lowercase_ ) with self.assertRaises(lowercase_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ ) def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**lowercase_ ) lowerCAmelCase_ = [scheduler.config.num_train_timesteps] with self.assertRaises( lowercase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=lowercase_ )
14
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class a_ ( a_ , a_ ): '''simple docstring''' __a: Optional[Any] = '''nat''' __a: int = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = depths lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = num_heads lowerCAmelCase_ = kernel_size lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = hidden_act lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) lowerCAmelCase_ = layer_scale_init_value lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )] lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
14
1
lowerCamelCase_ = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } lowerCamelCase_ = {value: key for key, value in encode_dict.items()} def lowerCamelCase ( a_ ) -> str: lowerCAmelCase_ = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowerCamelCase ( a_ ) -> str: if set(a_ ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowerCAmelCase_ = '' for word in coded.split(): while len(a_ ) != 0: decoded += decode_dict[word[:5]] lowerCAmelCase_ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
14
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) lowerCamelCase_ = """pytorch_model.bin""" lowerCamelCase_ = """pytorch_model.bin.index.json""" lowerCamelCase_ = """adapter_config.json""" lowerCamelCase_ = """adapter_model.bin""" lowerCamelCase_ = """adapter_model.safetensors""" lowerCamelCase_ = """tf_model.h5""" lowerCamelCase_ = """tf_model.h5.index.json""" lowerCamelCase_ = """model.ckpt""" lowerCamelCase_ = """flax_model.msgpack""" lowerCamelCase_ = """flax_model.msgpack.index.json""" lowerCamelCase_ = """model.safetensors""" lowerCamelCase_ = """model.safetensors.index.json""" lowerCamelCase_ = """config.json""" lowerCamelCase_ = """preprocessor_config.json""" lowerCamelCase_ = FEATURE_EXTRACTOR_NAME lowerCamelCase_ = """generation_config.json""" lowerCamelCase_ = """modelcard.json""" lowerCamelCase_ = """▁""" lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility lowerCamelCase_ = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def lowerCamelCase ( a_ ) -> Dict: if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: lowerCAmelCase_ = ( 'This example requires a source install from HuggingFace Transformers (see ' '`https://huggingface.co/docs/transformers/installation#install-from-source`),' ) else: lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ' 'versions of HuggingFace Transformers.' )
14
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' __a: Union[str, Any] = '''encoder-decoder''' __a: str = True def __init__( self , **lowercase_ ) -> str: '''simple docstring''' super().__init__(**lowercase_ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ = kwargs.pop('encoder' ) lowerCAmelCase_ = encoder_config.pop('model_type' ) lowerCAmelCase_ = kwargs.pop('decoder' ) lowerCAmelCase_ = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ = AutoConfig.for_model(lowercase_ , **lowercase_ ) lowerCAmelCase_ = AutoConfig.for_model(lowercase_ , **lowercase_ ) lowerCAmelCase_ = True @classmethod def _lowercase ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> PretrainedConfig: '''simple docstring''' logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) lowerCAmelCase_ = True lowerCAmelCase_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.encoder.to_dict() lowerCAmelCase_ = self.decoder.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
14
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCamelCase_ = transforms.Compose( [ transforms.Resize((2_5_6, 2_5_6)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCamelCase ( a_ ) -> List[str]: if isinstance(a_ , torch.Tensor ): return image elif isinstance(a_ , PIL.Image.Image ): lowerCAmelCase_ = [image] lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image] lowerCAmelCase_ = torch.stack(a_ ) return image class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ ) lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 ) lowerCAmelCase_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple: '''simple docstring''' if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' ) lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase_ = init_latents.shape lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents print('add noise to latents at timestep' , lowercase_ ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) lowerCAmelCase_ = init_latents return latents @torch.no_grad() def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(lowercase_ ) # 2. Preprocess image lowerCAmelCase_ = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device ) lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ ) lowerCAmelCase_ = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase_ = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
14
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) lowerCamelCase_ = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def lowerCamelCase ( a_ ) -> Optional[Any]: lowerCAmelCase_ = {} state_dict.pop('pixel_mean' , a_ ) state_dict.pop('pixel_std' , a_ ) lowerCAmelCase_ = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCAmelCase_ = key.replace(a_ , a_ ) if re.match(a_ , a_ ): lowerCAmelCase_ = int(re.match(a_ , a_ ).group(2 ) ) if layer_nb == 0: lowerCAmelCase_ = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: lowerCAmelCase_ = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: lowerCAmelCase_ = key.replace('layers.2' , 'proj_out' ) lowerCAmelCase_ = value lowerCAmelCase_ = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def lowerCamelCase ( a_ , a_ , a_ , a_="ybelkada/segment-anything" ) -> Union[str, Any]: lowerCAmelCase_ = hf_hub_download(a_ , F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: lowerCAmelCase_ = SamConfig() elif "sam_vit_l" in model_name: lowerCAmelCase_ = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCAmelCase_ = SamConfig( vision_config=a_ , ) elif "sam_vit_h" in model_name: lowerCAmelCase_ = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCAmelCase_ = SamConfig( vision_config=a_ , ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' ) lowerCAmelCase_ = replace_keys(a_ ) lowerCAmelCase_ = SamImageProcessor() lowerCAmelCase_ = SamProcessor(image_processor=a_ ) lowerCAmelCase_ = SamModel(a_ ) hf_model.load_state_dict(a_ ) lowerCAmelCase_ = hf_model.to('cuda' ) lowerCAmelCase_ = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' ) lowerCAmelCase_ = [[[400, 650]]] lowerCAmelCase_ = [[1]] lowerCAmelCase_ = processor(images=np.array(a_ ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase_ = hf_model(**a_ ) lowerCAmelCase_ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_890_251_159_668 lowerCAmelCase_ = processor( images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase_ = hf_model(**a_ ) lowerCAmelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_712_603_092_193_604 lowerCAmelCase_ = ((75, 275, 1_725, 850),) lowerCAmelCase_ = processor(images=np.array(a_ ) , input_boxes=a_ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase_ = hf_model(**a_ ) lowerCAmelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_686_015_605_926_514 # Test with 2 points and 1 image. lowerCAmelCase_ = [[[400, 650], [800, 650]]] lowerCAmelCase_ = [[1, 1]] lowerCAmelCase_ = processor( images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase_ = hf_model(**a_ ) lowerCAmelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_936_047_792_434_692 if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() lowerCamelCase_ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) lowerCamelCase_ = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
14
def lowerCamelCase ( a_ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) lowerCAmelCase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowerCAmelCase_ = 1 if upper_limit > 0: lowerCAmelCase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(a_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowerCamelCase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
lowerCamelCase_ = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 1_0: """a""", 1_1: """b""", 1_2: """c""", 1_3: """d""", 1_4: """e""", 1_5: """f""", } def lowerCamelCase ( a_ ) -> str: assert type(a_ ) in (int, float) and decimal == int(a_ ) lowerCAmelCase_ = int(a_ ) lowerCAmelCase_ = '' lowerCAmelCase_ = False if decimal < 0: lowerCAmelCase_ = True decimal *= -1 while decimal > 0: lowerCAmelCase_ , lowerCAmelCase_ = divmod(a_ , 16 ) lowerCAmelCase_ = values[remainder] + hexadecimal lowerCAmelCase_ = '0x' + hexadecimal if negative: lowerCAmelCase_ = '-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
14
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(a_ ) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> Any: '''simple docstring''' super().__init__(*lowercase_ , **lowercase_ ) self.check_model_type(lowercase_ ) def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = {}, {} if padding is not None: lowerCAmelCase_ = padding if truncation is not None: lowerCAmelCase_ = truncation if top_k is not None: lowerCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int: '''simple docstring''' if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ): lowerCAmelCase_ = {'image': image, 'question': question} else: lowerCAmelCase_ = image lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ ) return results def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = load_image(inputs['image'] ) lowerCAmelCase_ = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ ) lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) return model_inputs def _lowercase ( self , lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self.model(**lowercase_ ) return model_outputs def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any: '''simple docstring''' if top_k > self.model.config.num_labels: lowerCAmelCase_ = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase_ = model_outputs.logits.sigmoid()[0] lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCAmelCase_ = scores.tolist() lowerCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
14
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowerCamelCase_ = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } lowerCamelCase_ = { """facebook/blenderbot_small-90M""": 5_1_2, } class a_ ( a_ ): '''simple docstring''' __a: Optional[int] = VOCAB_FILES_NAMES __a: List[str] = PRETRAINED_VOCAB_FILES_MAP __a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: List[str] = BlenderbotSmallTokenizer def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Any: '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=lowercase_ , merges=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , ) , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , **lowercase_ , ) lowerCAmelCase_ = add_prefix_space def _lowercase ( self , lowercase_ , lowercase_=None ) -> int: '''simple docstring''' lowerCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]: '''simple docstring''' lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
14
def lowerCamelCase ( a_ ) -> bool: lowerCAmelCase_ = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowerCAmelCase_ = set() return any( node not in visited and depth_first_search(a_ , a_ , a_ , a_ ) for node in graph ) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool: visited.add(a_ ) rec_stk.add(a_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a_ , a_ , a_ , a_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
14
1
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowerCamelCase ( a_ , a_ ) -> Optional[Any]: lowerCAmelCase_ = [] for part_id in partition_order: lowerCAmelCase_ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(a_ ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Dict: lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase_ = spark.range(100 ).repartition(1 ) lowerCAmelCase_ = Spark(a_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase_ = spark.range(10 ).repartition(2 ) lowerCAmelCase_ = [1, 0] lowerCAmelCase_ = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions. lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Tuple: lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase_ = spark.range(10 ).repartition(1 ) lowerCAmelCase_ = SparkExamplesIterable(a_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(a_ ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase_ = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('numpy.random.Generator' ) as generator_mock: lowerCAmelCase_ = lambda a_ : x.reverse() lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] ) lowerCAmelCase_ = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(a_ ): lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Union[str, Any]: lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase_ = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowerCAmelCase_ = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(a_ ): lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowerCAmelCase_ = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(a_ ): lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase ( ) -> Tuple: lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase_ = spark.range(100 ).repartition(1 ) lowerCAmelCase_ = Spark(a_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
14
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: int = StableDiffusionInpaintPipeline __a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __a: int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __a: List[str] = frozenset([] ) def _lowercase ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , ) lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) lowerCAmelCase_ = CLIPTextModel(lowercase_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int: '''simple docstring''' lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) ) lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) ) if str(lowercase_ ).startswith('mps' ): lowerCAmelCase_ = torch.manual_seed(lowercase_ ) else: lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowerCAmelCase_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ ) lowerCAmelCase_ = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ ) lowerCAmelCase_ = sd_pipe(**lowercase_ ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Any: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowercase ( self ) -> List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' ) lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , ) lowerCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
14
1
from random import randint, random def lowerCamelCase ( a_ , a_ , a_ , a_ = False , a_ = False , a_ = 5 , ) -> list: lowerCAmelCase_ = [[-1] * number_of_cells] # Create a highway without any car lowerCAmelCase_ = 0 lowerCAmelCase_ = max(a_ , 0 ) while i < number_of_cells: lowerCAmelCase_ = ( randint(0 , a_ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowerCamelCase ( a_ , a_ ) -> int: lowerCAmelCase_ = 0 lowerCAmelCase_ = highway_now[car_index + 1 :] for cell in range(len(a_ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(a_ , -1 ) def lowerCamelCase ( a_ , a_ , a_ ) -> list: lowerCAmelCase_ = len(a_ ) # Beforce calculations, the highway is empty lowerCAmelCase_ = [-1] * number_of_cells for car_index in range(a_ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed lowerCAmelCase_ = min(highway_now[car_index] + 1 , a_ ) # Number of empty cell before the next car lowerCAmelCase_ = get_distance(a_ , a_ ) - 1 # We can't have the car causing an accident lowerCAmelCase_ = min(next_highway[car_index] , a_ ) if random() < probability: # Randomly, a driver will slow down lowerCAmelCase_ = max(next_highway[car_index] - 1 , 0 ) return next_highway def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> list: lowerCAmelCase_ = len(highway[0] ) for i in range(a_ ): lowerCAmelCase_ = update(highway[i] , a_ , a_ ) lowerCAmelCase_ = [-1] * number_of_cells for car_index in range(a_ ): lowerCAmelCase_ = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) lowerCAmelCase_ = (car_index + speed) % number_of_cells # Commit the change of position lowerCAmelCase_ = speed highway.append(a_ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
14
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a_ : '''simple docstring''' __a: int __a: int class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = [[] for _ in range(lowercase_ )] lowerCAmelCase_ = size def __getitem__( self , lowercase_ ) -> Iterator[Edge]: '''simple docstring''' return iter(self._graph[vertex] ) @property def _lowercase ( self ) -> List[Any]: '''simple docstring''' return self._size def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) ) def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None: '''simple docstring''' lowerCAmelCase_ = deque([start_vertex] ) lowerCAmelCase_ = [None] * self.size lowerCAmelCase_ = 0 while queue: lowerCAmelCase_ = queue.popleft() lowerCAmelCase_ = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCAmelCase_ = current_distance + edge.weight lowerCAmelCase_ = distances[edge.destination_vertex] if ( isinstance(lowercase_ , lowercase_ ) and new_distance >= dest_vertex_distance ): continue lowerCAmelCase_ = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
14
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use CLIPImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
from __future__ import annotations lowerCamelCase_ = 1_0 def lowerCamelCase ( a_ ) -> list[int]: lowerCAmelCase_ = 1 lowerCAmelCase_ = max(a_ ) while placement <= max_digit: # declare and initialize empty buckets lowerCAmelCase_ = [[] for _ in range(a_ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCAmelCase_ = int((i / placement) % RADIX ) buckets[tmp].append(a_ ) # put each buckets' contents into list_of_ints lowerCAmelCase_ = 0 for b in range(a_ ): for i in buckets[b]: lowerCAmelCase_ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
14
1
def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = [[0 for _ in range(a_ )] for _ in range(m + 1 )] for i in range(m + 1 ): lowerCAmelCase_ = 1 for n in range(m + 1 ): for k in range(1 , a_ ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCamelCase_ = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: lowerCamelCase_ = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]: # load base model lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowerCAmelCase_ = load_file(a_ ) lowerCAmelCase_ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.text_encoder else: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.unet # find the target layer lowerCAmelCase_ = layer_infos.pop(0 ) while len(a_ ) > -1: try: lowerCAmelCase_ = curr_layer.__getattr__(a_ ) if len(a_ ) > 0: lowerCAmelCase_ = layer_infos.pop(0 ) elif len(a_ ) == 0: break except Exception: if len(a_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowerCAmelCase_ = layer_infos.pop(0 ) lowerCAmelCase_ = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(a_ ) else: pair_keys.append(a_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 ) else: lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ) # update visited list for item in pair_keys: visited.append(a_ ) return pipeline if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = args.base_model_path lowerCamelCase_ = args.checkpoint_path lowerCamelCase_ = args.dump_path lowerCamelCase_ = args.lora_prefix_unet lowerCamelCase_ = args.lora_prefix_text_encoder lowerCamelCase_ = args.alpha lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCamelCase_ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self , lowercase_ ) -> Union[str, Any]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): lowerCAmelCase_ = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowercase_ ) def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = 'sgugger/tiny-distilbert-classification' lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ ) # set architectures equal to `None` lowerCAmelCase_ = None lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tinier_bart' lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tinier_bart' lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase_ , 'env.csv' ) , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase_ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_ , 'env.csv' ) ).exists() ) def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowercase_ ): self.assertTrue(hasattr(lowercase_ , 'sequential' ) ) self.assertTrue(hasattr(lowercase_ , 'cumulative' ) ) self.assertTrue(hasattr(lowercase_ , 'current' ) ) self.assertTrue(hasattr(lowercase_ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , 'log.txt' ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , ) lowerCAmelCase_ = PyTorchBenchmark(lowercase_ ) lowerCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(lowercase_ , 'log.txt' ) ).exists() )
14
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase ( a_ ) -> Any: lowerCAmelCase_ = tmp_path / 'file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = tmp_path / 'malformed_file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ , a_ ) -> List[str]: lowerCAmelCase_ = tmp_path / 'csv_with_image.csv' lowerCAmelCase_ = textwrap.dedent( F'''\ image {image_file} ''' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = tmp_path / 'csv_with_label.csv' lowerCAmelCase_ = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv' lowerCAmelCase_ = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]: lowerCAmelCase_ = Csv() lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(a_ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(a_ ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase ( a_ ) -> Optional[Any]: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() lowerCAmelCase_ = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase ( a_ ) -> int: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1:] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() lowerCAmelCase_ = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels] def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) lowerCAmelCase_ = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
14
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class a_ ( a_ ): '''simple docstring''' __a: List[Any] = '''blip_2_vision_model''' def __init__( self , lowercase_=1_4_0_8 , lowercase_=6_1_4_4 , lowercase_=3_9 , lowercase_=1_6 , lowercase_=2_2_4 , lowercase_=1_4 , lowercase_="gelu" , lowercase_=0.0_00_01 , lowercase_=0.0 , lowercase_=1e-10 , lowercase_=True , **lowercase_ , ) -> int: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = patch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = hidden_act lowerCAmelCase_ = qkv_bias @classmethod def _lowercase ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(lowercase_ , **lowercase_ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": lowerCAmelCase_ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowercase_ , **lowercase_ ) class a_ ( a_ ): '''simple docstring''' __a: Union[str, Any] = '''blip_2_qformer''' def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=2 , lowercase_=1_4_0_8 , **lowercase_ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=lowercase_ , **lowercase_ ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = position_embedding_type lowerCAmelCase_ = cross_attention_frequency lowerCAmelCase_ = encoder_hidden_size @classmethod def _lowercase ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(lowercase_ , **lowercase_ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": lowerCAmelCase_ = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowercase_ , **lowercase_ ) class a_ ( a_ ): '''simple docstring''' __a: Tuple = '''blip-2''' __a: Any = True def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=3_2 , **lowercase_ ) -> List[Any]: '''simple docstring''' super().__init__(**lowercase_ ) if vision_config is None: lowerCAmelCase_ = {} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: lowerCAmelCase_ = {} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: lowerCAmelCase_ = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) lowerCAmelCase_ = BlipaVisionConfig(**lowercase_ ) lowerCAmelCase_ = BlipaQFormerConfig(**lowercase_ ) lowerCAmelCase_ = text_config['model_type'] if 'model_type' in text_config else 'opt' lowerCAmelCase_ = CONFIG_MAPPING[text_model_type](**lowercase_ ) lowerCAmelCase_ = self.text_config.tie_word_embeddings lowerCAmelCase_ = self.text_config.is_encoder_decoder lowerCAmelCase_ = num_query_tokens lowerCAmelCase_ = self.vision_config.hidden_size lowerCAmelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCAmelCase_ = 1.0 lowerCAmelCase_ = 0.02 @classmethod def _lowercase ( cls , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , ) -> Optional[Any]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , ) def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.vision_config.to_dict() lowerCAmelCase_ = self.qformer_config.to_dict() lowerCAmelCase_ = self.text_config.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
14
from maths.prime_factors import prime_factors def lowerCamelCase ( a_ ) -> int: if not isinstance(a_ , a_ ): lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(a_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
14
1
lowerCamelCase_ = 6_5_5_2_1 def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 for plain_chr in plain_text: lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER lowerCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
14
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( a_ , a_ ) -> Tuple: lowerCAmelCase_ = XCLIPTextConfig() # derive patch size from model name lowerCAmelCase_ = model_name.find('patch' ) lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ ) if "large" in model_name: lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 lowerCAmelCase_ = 12 lowerCAmelCase_ = 1_024 lowerCAmelCase_ = 4_096 lowerCAmelCase_ = 16 lowerCAmelCase_ = 24 lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 if model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = 336 lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ ) if "large" in model_name: lowerCAmelCase_ = 768 return config def lowerCamelCase ( a_ ) -> List[str]: # text encoder if name == "token_embedding.weight": lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowerCAmelCase_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowerCAmelCase_ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowerCAmelCase_ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def lowerCamelCase ( a_ , a_ ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(a_ ) if "attn.in_proj" in key: lowerCAmelCase_ = key.split('.' ) if key.startswith('visual' ): lowerCAmelCase_ = key_split[3] lowerCAmelCase_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[ :dim ] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[ -dim: ] else: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] elif key.startswith('mit' ): lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.vision_config.mit_hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[dim : dim * 2, :] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = rename_key(a_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowerCAmelCase_ = val.T lowerCAmelCase_ = val return orig_state_dict def lowerCamelCase ( a_ ) -> List[str]: if num_frames == 8: lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowerCAmelCase_ = 'eating_spaghetti.npy' elif num_frames == 32: lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy' lowerCAmelCase_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , ) lowerCAmelCase_ = np.load(a_ ) return list(a_ ) def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]: lowerCAmelCase_ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowerCAmelCase_ = model_to_url[model_name] lowerCAmelCase_ = 8 if "16-frames" in model_name: lowerCAmelCase_ = 16 elif "shot" in model_name: lowerCAmelCase_ = 32 lowerCAmelCase_ = get_xclip_config(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) model.eval() if "drive" in checkpoint_url: lowerCAmelCase_ = 'pytorch_model.bin' gdown.cached_download(a_ , a_ , quiet=a_ ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model'] else: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model'] lowerCAmelCase_ = convert_state_dict(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ ) lowerCAmelCase_ = prepare_video(a_ ) lowerCAmelCase_ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) # Verify outputs lowerCAmelCase_ = outputs.logits_per_video lowerCAmelCase_ = logits_per_video.softmax(dim=1 ) print('Probs:' , a_ ) # kinetics-400 if model_name == "xclip-base-patch32": lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(a_ , a_ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(a_ , organization='nielsr' ) processor.push_to_hub(a_ , organization='nielsr' ) slow_tokenizer.push_to_hub(a_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(a_ ) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> Any: '''simple docstring''' super().__init__(*lowercase_ , **lowercase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _lowercase ( self , lowercase_=None ) -> int: '''simple docstring''' lowerCAmelCase_ = {} if top_k is not None: lowerCAmelCase_ = top_k return {}, {}, postprocess_params def __call__( self , lowercase_ , **lowercase_ ) -> Dict: '''simple docstring''' return super().__call__(lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = load_image(lowercase_ ) lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework ) return model_inputs def _lowercase ( self , lowercase_ ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = self.model(**lowercase_ ) return model_outputs def _lowercase ( self , lowercase_ , lowercase_=5 ) -> int: '''simple docstring''' if top_k > self.model.config.num_labels: lowerCAmelCase_ = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase_ = model_outputs.logits.softmax(-1 )[0] lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ ) elif self.framework == "tf": lowerCAmelCase_ = stable_softmax(model_outputs.logits , axis=-1 )[0] lowerCAmelCase_ = tf.math.top_k(lowercase_ , k=lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCAmelCase_ = scores.tolist() lowerCAmelCase_ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
14
def lowerCamelCase ( a_ , a_ ) -> List[Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: lowerCAmelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
14
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """microsoft/swinv2-tiny-patch4-window8-256""": ( """https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json""" ), } class a_ ( a_ ): '''simple docstring''' __a: List[str] = '''swinv2''' __a: Any = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , lowercase_=2_2_4 , lowercase_=4 , lowercase_=3 , lowercase_=9_6 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 1_2, 2_4] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=3_2 , **lowercase_ , ) -> List[Any]: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = depths lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = num_heads lowerCAmelCase_ = window_size lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = hidden_act lowerCAmelCase_ = use_absolute_embeddings lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = initializer_range lowerCAmelCase_ = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) lowerCAmelCase_ = (0, 0, 0, 0)
14
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class a_ ( a_ ): '''simple docstring''' __a: str = ['''vqvae'''] def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ ) def _lowercase ( self ) -> int: '''simple docstring''' return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0 @torch.no_grad() def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' lowerCAmelCase_ = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase_ , device=self.device , ) lowerCAmelCase_ = noise lowerCAmelCase_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase_ , lowercase_ ) lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ ) lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample( generator=lowercase_ )[0] lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase_ = int(mask_start_secs * pixels_per_second ) lowerCAmelCase_ = int(mask_end_secs * pixels_per_second ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase_ ): lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample'] else: lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] if isinstance(self.scheduler , lowercase_ ): lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample'] else: lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample'] if mask is not None: if mask_start > 0: lowerCAmelCase_ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample'] lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' ) lowerCAmelCase_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) ) lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , lowercase_ ) self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase_ = self.scheduler.alphas_cumprod[t] lowerCAmelCase_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase_ = 1 - alpha_prod_t lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor: '''simple docstring''' lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
14
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]: def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ): lowerCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: lowerCAmelCase_ = math.ceil(val / multiple ) * multiple return x lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = output_size # determine new height and width lowerCAmelCase_ = output_height / input_height lowerCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCAmelCase_ = scale_width else: # fit height lowerCAmelCase_ = scale_height lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ ) lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ ) return (new_height, new_width) class a_ ( a_ ): '''simple docstring''' __a: Union[str, Any] = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4} lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCAmelCase_ = get_resize_output_image_size( lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict: '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: '''simple docstring''' lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowerCAmelCase_ = {'pixel_values': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowercase_ ): lowerCAmelCase_ = target_sizes.numpy() lowerCAmelCase_ = [] for idx in range(len(lowercase_ ) ): lowerCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ ) lowerCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: lowerCAmelCase_ = logits.argmax(dim=1 ) lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
14
1
import datasets from .evaluate import evaluate lowerCamelCase_ = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ lowerCamelCase_ = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ lowerCamelCase_ = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): '''simple docstring''' def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )}, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , ) def _lowercase ( self , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions} lowerCAmelCase_ = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] lowerCAmelCase_ = evaluate(dataset=lowercase_ , predictions=lowercase_ ) return score
14
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
1
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase_ = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", }, """tokenizer_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""", }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase_ = { """t5-small""": 5_1_2, """t5-base""": 5_1_2, """t5-large""": 5_1_2, """t5-3b""": 5_1_2, """t5-11b""": 5_1_2, } class a_ ( a_ ): '''simple docstring''' __a: List[str] = VOCAB_FILES_NAMES __a: Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: Dict = ['''input_ids''', '''attention_mask'''] __a: List[Any] = TaTokenizer __a: List[int] = [] def __init__( self , lowercase_=None , lowercase_=None , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_=1_0_0 , lowercase_=None , **lowercase_ , ) -> Optional[Any]: '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowerCAmelCase_ = [f'''<extra_id_{i}>''' for i in range(lowercase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowerCAmelCase_ = len(set(filter(lambda lowercase_ : bool('extra_id_' in str(lowercase_ ) ) , lowercase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) lowerCAmelCase_ = vocab_file lowerCAmelCase_ = False if not self.vocab_file else True lowerCAmelCase_ = extra_ids @staticmethod def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowerCAmelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowercase_ , ) return max_model_length def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowercase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase_ = os.path.join( lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) logger.info(f'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]: '''simple docstring''' lowerCAmelCase_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowerCAmelCase_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]: '''simple docstring''' lowerCAmelCase_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return list( set(filter(lambda lowercase_ : bool(re.search(R'<extra_id_\d+>' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) ) def _lowercase ( self ) -> List[str]: '''simple docstring''' return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
14
from __future__ import annotations import queue class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = data lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCamelCase ( ) -> TreeNode: print('\n********Press N to stop entering at any point of time********\n' ) lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower() lowerCAmelCase_ = queue.Queue() lowerCAmelCase_ = TreeNode(int(a_ ) ) q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = left_node q.put(a_ ) lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = right_node q.put(a_ ) raise def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = [] while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(a_ ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(a_ ) lowerCAmelCase_ = n.left # end of while means current node doesn't have left child lowerCAmelCase_ = stack.pop() # start to traverse its right child lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: stack.append(a_ ) lowerCAmelCase_ = n.left lowerCAmelCase_ = stack.pop() print(n.data , end=',' ) lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ , lowerCAmelCase_ = [], [] lowerCAmelCase_ = node stacka.append(a_ ) while stacka: # to find the reversed order of post order, store it in stack2 lowerCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(a_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str: if not s: return "\n" + width * char lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowerCamelCase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 5_0 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
14
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class a_ ( a_ ): '''simple docstring''' __a: List[str] = '''luke''' def __init__( self , lowercase_=5_0_2_6_7 , lowercase_=5_0_0_0_0_0 , lowercase_=7_6_8 , lowercase_=2_5_6 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=None , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = entity_vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = entity_emb_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = use_entity_aware_attention lowerCAmelCase_ = classifier_dropout
14
import baseaa def lowerCamelCase ( a_ ) -> bytes: return baseaa.baaencode(string.encode('utf-8' ) ) def lowerCamelCase ( a_ ) -> str: return baseaa.baadecode(a_ ).decode('utf-8' ) if __name__ == "__main__": lowerCamelCase_ = """Hello World!""" lowerCamelCase_ = baseaa_encode(test) print(encoded) lowerCamelCase_ = baseaa_decode(encoded) print(decoded)
14
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class a_ ( a_ ): '''simple docstring''' __a: Optional[Any] = ['''image_processor''', '''tokenizer'''] __a: List[str] = '''LayoutLMv3ImageProcessor''' __a: str = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowercase_ , ) lowerCAmelCase_ = kwargs.pop('feature_extractor' ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowercase_ , lowercase_ ) def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowerCAmelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase_ = features['words'] lowerCAmelCase_ = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowerCAmelCase_ = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase_ = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase_ = images return encoded_inputs def _lowercase ( self , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def _lowercase ( self , *lowercase_ , **lowercase_ ) -> List[str]: '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def _lowercase ( self , *lowercase_ , **lowercase_ ) -> Optional[int]: '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def _lowercase ( self ) -> Dict: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowercase ( self ) -> str: '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , ) return self.image_processor_class @property def _lowercase ( self ) -> int: '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , ) return self.image_processor
14
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int: if attention_mask is None: lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class a_ : '''simple docstring''' __a: Tuple = OPTConfig __a: Optional[Any] = {} __a: Tuple = '''gelu''' def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = embed_dim lowerCAmelCase_ = word_embed_proj_dim lowerCAmelCase_ = False def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , ) lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ ) return config, inputs_dict def _lowercase ( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = TFOPTModel(config=lowercase_ ) lowerCAmelCase_ = inputs_dict['input_ids'] lowerCAmelCase_ = input_ids[:1, :] lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :] lowerCAmelCase_ = 1 # first forward pass lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0] lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 ) @require_tf class a_ ( a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else () __a: Union[str, Any] = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) __a: int = False __a: List[Any] = False __a: Dict = False __a: List[Any] = 1_0 def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = TFOPTModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase_ , lowercase_ ): if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings lowerCAmelCase_ = model_class(config=lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCAmelCase_ = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase_ ) # check that weights remain the same after resizing lowerCAmelCase_ = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase_ ) lowerCAmelCase_ = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) def lowerCamelCase ( a_ ) -> Any: return tf.constant(a_ , dtype=tf.intaa ) @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' __a: Optional[int] = 9_9 def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowerCAmelCase_ = input_ids.shape[0] lowerCAmelCase_ = OPTConfig( vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' ) lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id ) with tf.GradientTape(): lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state lowerCAmelCase_ = (1, 1_1, 5_1_2) self.assertEqual(output.shape , lowercase_ ) lowerCAmelCase_ = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowerCAmelCase_ = 'facebook/opt-350m' def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model ) lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowerCAmelCase_ = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-125m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) lowerCAmelCase_ = 'left' # use different length sentences to test batching lowerCAmelCase_ = [ 'Hello, my dog is a little', 'Today, I', ] lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ ) lowerCAmelCase_ = inputs['input_ids'] lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] ) lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ ) lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ )
14
1
from __future__ import annotations from scipy.special import comb # type: ignore class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> Any: '''simple docstring''' lowerCAmelCase_ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowerCAmelCase_ = len(lowercase_ ) - 1 def _lowercase ( self , lowercase_ ) -> list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCAmelCase_ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , lowercase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(lowercase_ ) , 5 ) == 1 return output_values def _lowercase ( self , lowercase_ ) -> tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCAmelCase_ = self.basis_function(lowercase_ ) lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _lowercase ( self , lowercase_ = 0.01 ) -> Any: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore lowerCAmelCase_ = [] # x coordinates of points to plot lowerCAmelCase_ = [] # y coordinates of points to plot lowerCAmelCase_ = 0.0 while t <= 1: lowerCAmelCase_ = self.bezier_curve_function(lowercase_ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowerCAmelCase_ = [i[0] for i in self.list_of_points] lowerCAmelCase_ = [i[1] for i in self.list_of_points] plt.plot( lowercase_ , lowercase_ , color='blue' , label='Curve of Degree ' + str(self.degree ) , ) plt.scatter(lowercase_ , lowercase_ , color='red' , label='Control Points' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
14
lowerCamelCase_ = 6_5_5_2_1 def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 for plain_chr in plain_text: lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER lowerCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def lowerCamelCase ( a_ = "" ) -> dict[str, float]: lowerCAmelCase_ = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' lowerCAmelCase_ = BeautifulSoup(requests.get(a_ ).text , 'html.parser' ) lowerCAmelCase_ = soup.find_all('td' , attrs='titleColumn' ) lowerCAmelCase_ = soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(a_ , a_ ) } def lowerCamelCase ( a_ = "IMDb_Top_250_Movies.csv" ) -> None: lowerCAmelCase_ = get_imdb_top_aaa_movies() with open(a_ , 'w' , newline='' ) as out_file: lowerCAmelCase_ = csv.writer(a_ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
14
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_=False ) -> Tuple: lowerCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): lowerCAmelCase_ = 'segformer.encoder.' + key if key.startswith('backbone' ): lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' ) if "norm" in key: lowerCAmelCase_ = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' ) if "layer_norm1" in key: lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ = key[key.find('block' ) + len('block' )] lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' ) if "attn.q" in key: lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase_ = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase_ = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase_ = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' ) if key.startswith('head' ): lowerCAmelCase_ = key.replace('head' , 'classifier' ) lowerCAmelCase_ = value return new_state_dict def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ) return image @torch.no_grad() def lowerCamelCase ( a_ , a_ , a_ ) -> int: lowerCAmelCase_ = SegformerConfig() lowerCAmelCase_ = False # set attributes based on model_name lowerCAmelCase_ = 'huggingface/label-files' if "segformer" in model_name: lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: lowerCAmelCase_ = 150 lowerCAmelCase_ = 'ade20k-id2label.json' lowerCAmelCase_ = (1, 150, 128, 128) elif "city" in model_name: lowerCAmelCase_ = 19 lowerCAmelCase_ = 'cityscapes-id2label.json' lowerCAmelCase_ = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: lowerCAmelCase_ = True lowerCAmelCase_ = model_name[4:6] lowerCAmelCase_ = 1_000 lowerCAmelCase_ = 'imagenet-1k-id2label.json' lowerCAmelCase_ = (1, 1_000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 256 elif size == "b2": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 6, 3] elif size == "b3": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 18, 3] elif size == "b4": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 8, 27, 3] elif size == "b5": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) lowerCAmelCase_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ ) # prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) ) else: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_ ) # create HuggingFace model and load state dict if encoder_only: lowerCAmelCase_ = False lowerCAmelCase_ = SegformerForImageClassification(a_ ) else: lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass lowerCAmelCase_ = model(a_ ) lowerCAmelCase_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCAmelCase_ = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: lowerCAmelCase_ = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
14
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase_ = trt.Logger(trt.Logger.WARNING) lowerCamelCase_ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase_ = logging.getLogger(__name__) lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=3_8_4, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=1_2_8, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=2_0, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=3_0, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=4_2, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) lowerCamelCase_ = parser.parse_args() if args.tokenizer_name: lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) lowerCamelCase_ = args.per_device_eval_batch_size lowerCamelCase_ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase_ = True lowerCamelCase_ = """temp_engine/bert-fp32.engine""" if args.fpaa: lowerCamelCase_ = """temp_engine/bert-fp16.engine""" if args.inta: lowerCamelCase_ = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") lowerCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase_ = 1 << 5_0 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase_ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase_ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]: lowerCAmelCase_ = np.asarray(inputs['input_ids'] , dtype=np.intaa ) lowerCAmelCase_ = np.asarray(inputs['attention_mask'] , dtype=np.intaa ) lowerCAmelCase_ = np.asarray(inputs['token_type_ids'] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , a_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , a_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , a_ ) # start time lowerCAmelCase_ = time.time() # Run inference context.execute_async( bindings=[int(a_ ) for d_inp in d_inputs] + [int(a_ ), int(a_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(a_ , a_ , a_ ) cuda.memcpy_dtoh_async(a_ , a_ , a_ ) # Synchronize the stream and take time stream.synchronize() # end time lowerCAmelCase_ = time.time() lowerCAmelCase_ = end_time - start_time lowerCAmelCase_ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase_ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase_ = raw_datasets["""validation"""].column_names lowerCamelCase_ = """question""" if """question""" in column_names else column_names[0] lowerCamelCase_ = """context""" if """context""" in column_names else column_names[1] lowerCamelCase_ = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase_ = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase ( a_ ) -> List[Any]: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace lowerCAmelCase_ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. lowerCAmelCase_ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=a_ , stride=args.doc_stride , return_overflowing_tokens=a_ , return_offsets_mapping=a_ , padding='max_length' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. lowerCAmelCase_ = tokenized_examples.pop('overflow_to_sample_mapping' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. lowerCAmelCase_ = [] for i in range(len(tokenized_examples['input_ids'] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). lowerCAmelCase_ = tokenized_examples.sequence_ids(a_ ) lowerCAmelCase_ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. lowerCAmelCase_ = sample_mapping[i] tokenized_examples["example_id"].append(examples['id'][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. lowerCAmelCase_ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['offset_mapping'][i] ) ] return tokenized_examples lowerCamelCase_ = raw_datasets["""validation"""] # Validation Feature Creation lowerCamelCase_ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) lowerCamelCase_ = default_data_collator lowerCamelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) lowerCamelCase_ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase ( a_ , a_ , a_ , a_="eval" ) -> List[Any]: # Post-processing: we match the start logits and end logits to answers in the original context. lowerCAmelCase_ = postprocess_qa_predictions( examples=a_ , features=a_ , predictions=a_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=a_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: lowerCAmelCase_ = [ {'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items() ] else: lowerCAmelCase_ = [{'id': k, 'prediction_text': v} for k, v in predictions.items()] lowerCAmelCase_ = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=a_ , label_ids=a_ ) lowerCamelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase ( a_ ) -> List[Any]: return trt.volume(engine.get_binding_shape(a_ ) ) * engine.get_binding_dtype(a_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase_ = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase_ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase_ = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase_ = 0.0 lowerCamelCase_ = 0 lowerCamelCase_ = timeit.default_timer() lowerCamelCase_ = None for step, batch in enumerate(eval_dataloader): lowerCamelCase_ , lowerCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase_ , lowerCamelCase_ = outputs lowerCamelCase_ = torch.tensor(start_logits) lowerCamelCase_ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0) lowerCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0) lowerCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0) if all_preds is not None: lowerCamelCase_ = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase_ = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0)) logger.info("""Total Number of Inference = %d""", niter) lowerCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
14
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class a_ ( a_ , a_ ): '''simple docstring''' __a: Optional[Any] = '''nat''' __a: int = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = depths lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = num_heads lowerCAmelCase_ = kernel_size lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = hidden_act lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) lowerCAmelCase_ = layer_scale_init_value lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )] lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
14
1
def lowerCamelCase ( a_ ) -> int: assert column_title.isupper() lowerCAmelCase_ = 0 lowerCAmelCase_ = len(a_ ) - 1 lowerCAmelCase_ = 0 while index >= 0: lowerCAmelCase_ = (ord(column_title[index] ) - 64) * pow(26 , a_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
14
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) lowerCamelCase_ = """pytorch_model.bin""" lowerCamelCase_ = """pytorch_model.bin.index.json""" lowerCamelCase_ = """adapter_config.json""" lowerCamelCase_ = """adapter_model.bin""" lowerCamelCase_ = """adapter_model.safetensors""" lowerCamelCase_ = """tf_model.h5""" lowerCamelCase_ = """tf_model.h5.index.json""" lowerCamelCase_ = """model.ckpt""" lowerCamelCase_ = """flax_model.msgpack""" lowerCamelCase_ = """flax_model.msgpack.index.json""" lowerCamelCase_ = """model.safetensors""" lowerCamelCase_ = """model.safetensors.index.json""" lowerCamelCase_ = """config.json""" lowerCamelCase_ = """preprocessor_config.json""" lowerCamelCase_ = FEATURE_EXTRACTOR_NAME lowerCamelCase_ = """generation_config.json""" lowerCamelCase_ = """modelcard.json""" lowerCamelCase_ = """▁""" lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility lowerCamelCase_ = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def lowerCamelCase ( a_ ) -> Dict: if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: lowerCAmelCase_ = ( 'This example requires a source install from HuggingFace Transformers (see ' '`https://huggingface.co/docs/transformers/installation#install-from-source`),' ) else: lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ' 'versions of HuggingFace Transformers.' )
14
1
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class a_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_input_mask lowerCAmelCase_ = use_token_type_ids lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_choices lowerCAmelCase_ = scope def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = None if self.use_input_mask: lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ = None if self.use_token_type_ids: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self ) -> Tuple: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = LlamaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ ) lowerCAmelCase_ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = True lowerCAmelCase_ = LlamaModel(lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase_ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) lowerCAmelCase_ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = LlamaForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int: '''simple docstring''' lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = LlamaForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass lowerCAmelCase_ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) lowerCAmelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase_ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] lowerCAmelCase_ = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] # select random slice lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) ) def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = config_and_inputs lowerCAmelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a_ ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __a: Union[str, Any] = (LlamaForCausalLM,) if is_torch_available() else () __a: str = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) __a: str = False __a: int = False def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = LlamaModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 ) def _lowercase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase_ = type self.model_tester.create_and_check_model(*lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = 3 lowerCAmelCase_ = input_dict['input_ids'] lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ ) lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase_ = LlamaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = 3 lowerCAmelCase_ = 'single_label_classification' lowerCAmelCase_ = input_dict['input_ids'] lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ ) lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase_ = LlamaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = 3 lowerCAmelCase_ = 'multi_label_classification' lowerCAmelCase_ = input_dict['input_ids'] lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ ) lowerCAmelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase_ = LlamaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase_ = LlamaModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase_ = {'type': scaling_type, 'factor': 10.0} lowerCAmelCase_ = LlamaModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) ) @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) lowerCAmelCase_ = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase_ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase_ = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , lowercase_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) lowerCAmelCase_ = model(torch.tensor(lowercase_ ) ) # Expected mean on dim = -1 lowerCAmelCase_ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase_ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , lowercase_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) lowerCAmelCase_ = model(torch.tensor(lowercase_ ) ) # Expected mean on dim = -1 lowerCAmelCase_ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase_ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) lowerCAmelCase_ = model(torch.tensor(lowercase_ ) ) lowerCAmelCase_ = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 ) # fmt: off lowerCAmelCase_ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , lowercase_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' lowerCAmelCase_ = 'Simply put, the theory of relativity states that ' lowerCAmelCase_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) lowerCAmelCase_ = tokenizer.encode(lowercase_ , return_tensors='pt' ) lowerCAmelCase_ = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase_ ) # greedy generation outputs lowerCAmelCase_ = model.generate(lowercase_ , max_new_tokens=6_4 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_ , lowercase_ )
14
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCamelCase_ = transforms.Compose( [ transforms.Resize((2_5_6, 2_5_6)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCamelCase ( a_ ) -> List[str]: if isinstance(a_ , torch.Tensor ): return image elif isinstance(a_ , PIL.Image.Image ): lowerCAmelCase_ = [image] lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image] lowerCAmelCase_ = torch.stack(a_ ) return image class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ ) lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 ) lowerCAmelCase_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple: '''simple docstring''' if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' ) lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase_ = init_latents.shape lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents print('add noise to latents at timestep' , lowercase_ ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) lowerCAmelCase_ = init_latents return latents @torch.no_grad() def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(lowercase_ ) # 2. Preprocess image lowerCAmelCase_ = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device ) lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ ) lowerCAmelCase_ = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase_ = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
14
1
def lowerCamelCase ( a_ , a_ ) -> int: if len(a_ ) != len(a_ ): raise ValueError('String lengths must match!' ) lowerCAmelCase_ = 0 for chara, chara in zip(a_ , a_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
14
def lowerCamelCase ( a_ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) lowerCAmelCase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowerCAmelCase_ = 1 if upper_limit > 0: lowerCAmelCase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(a_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowerCamelCase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowerCamelCase_ = pytest.mark.integration lowerCamelCase_ = {"""comet"""} lowerCamelCase_ = importlib.util.find_spec("""fairseq""") is not None lowerCamelCase_ = {"""code_eval"""} lowerCamelCase_ = os.name == """nt""" lowerCamelCase_ = {"""bertscore""", """frugalscore""", """perplexity"""} lowerCamelCase_ = importlib.util.find_spec("""transformers""") is not None def lowerCamelCase ( a_ ) -> List[Any]: @wraps(a_ ) def wrapper(self , a_ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , a_ ) return wrapper def lowerCamelCase ( a_ ) -> str: @wraps(a_ ) def wrapper(self , a_ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , a_ ) return wrapper def lowerCamelCase ( a_ ) -> int: @wraps(a_ ) def wrapper(self , a_ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , a_ ) return wrapper def lowerCamelCase ( ) -> Any: lowerCAmelCase_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( a_ , a_ , a_ ) @local class a_ ( parameterized.TestCase ): '''simple docstring''' __a: Optional[int] = {} __a: Tuple = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ = '[...]' lowerCAmelCase_ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowercase_ ) ).module_path ) lowerCAmelCase_ = datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase_ ) # check parameters lowerCAmelCase_ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowercase_ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase_ = doctest.testmod(lowercase_ , verbose=lowercase_ , raise_on_error=lowercase_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ = '[...]' lowerCAmelCase_ = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowercase_ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase_ = doctest.testmod(lowercase_ , verbose=lowercase_ , raise_on_error=lowercase_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def _lowercase ( self , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase_ ): yield else: yield @contextmanager def _lowercase ( self ) -> List[Any]: '''simple docstring''' def load_local_metric(lowercase_ , *lowercase_ , **lowercase_ ): return load_metric(os.path.join('metrics' , lowercase_ ) , *lowercase_ , **lowercase_ ) with patch('datasets.load_metric' ) as mock_load_metric: lowerCAmelCase_ = load_local_metric yield @classmethod def _lowercase ( cls , lowercase_ ) -> Any: '''simple docstring''' def wrapper(lowercase_ ): lowerCAmelCase_ = contextmanager(lowercase_ ) lowerCAmelCase_ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase ( a_ ) -> List[Any]: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class a_ ( a_ ): '''simple docstring''' def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: lowerCAmelCase_ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase ( a_ ) -> Dict: import torch def bert_cos_score_idf(a_ , a_ , *a_ , **a_ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(a_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: lowerCAmelCase_ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase ( a_ ) -> Union[str, Any]: def load_from_checkpoint(a_ ): class a_ : '''simple docstring''' def _lowercase ( self , lowercase_ , *lowercase_ , **lowercase_ ) -> Tuple: '''simple docstring''' assert len(lowercase_ ) == 2 lowerCAmelCase_ = [0.19, 0.92] return scores, sum(lowercase_ ) / len(lowercase_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: lowerCAmelCase_ = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: lowerCAmelCase_ = load_from_checkpoint yield def lowerCamelCase ( ) -> str: lowerCAmelCase_ = load_metric(os.path.join('metrics' , 'seqeval' ) ) lowerCAmelCase_ = 'ERROR' lowerCAmelCase_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(a_ , match=re.escape(a_ ) ): metric.compute(predictions=[] , references=[] , scheme=a_ )
14
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(a_ ) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> Any: '''simple docstring''' super().__init__(*lowercase_ , **lowercase_ ) self.check_model_type(lowercase_ ) def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = {}, {} if padding is not None: lowerCAmelCase_ = padding if truncation is not None: lowerCAmelCase_ = truncation if top_k is not None: lowerCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int: '''simple docstring''' if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ): lowerCAmelCase_ = {'image': image, 'question': question} else: lowerCAmelCase_ = image lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ ) return results def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = load_image(inputs['image'] ) lowerCAmelCase_ = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ ) lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework ) model_inputs.update(lowercase_ ) return model_inputs def _lowercase ( self , lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self.model(**lowercase_ ) return model_outputs def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any: '''simple docstring''' if top_k > self.model.config.num_labels: lowerCAmelCase_ = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase_ = model_outputs.logits.sigmoid()[0] lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCAmelCase_ = scores.tolist() lowerCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
14
1
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np lowerCamelCase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 lowerCamelCase_ = typing.Union[np.floataa, int, float] # noqa: UP007 def lowerCamelCase ( a_ , a_ ) -> VectorOut: return np.sqrt(np.sum((np.asarray(a_ ) - np.asarray(a_ )) ** 2 ) ) def lowerCamelCase ( a_ , a_ ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(a_ , a_ ) ) ** (1 / 2) if __name__ == "__main__": def lowerCamelCase ( ) -> None: from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) benchmark()
14
def lowerCamelCase ( a_ ) -> bool: lowerCAmelCase_ = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowerCAmelCase_ = set() return any( node not in visited and depth_first_search(a_ , a_ , a_ , a_ ) for node in graph ) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool: visited.add(a_ ) rec_stk.add(a_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a_ , a_ , a_ , a_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
14
1
def lowerCamelCase ( a_ ) -> list[list[float]]: lowerCAmelCase_ = [] for data in source_data: for i, el in enumerate(a_ ): if len(a_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(a_ ) ) return data_lists def lowerCamelCase ( a_ , a_ ) -> list[list[float]]: lowerCAmelCase_ = [] for dlist, weight in zip(a_ , a_ ): lowerCAmelCase_ = min(a_ ) lowerCAmelCase_ = max(a_ ) lowerCAmelCase_ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowerCAmelCase_ = F'''Invalid weight of {weight:f} provided''' raise ValueError(a_ ) score_lists.append(a_ ) return score_lists def lowerCamelCase ( a_ ) -> list[float]: lowerCAmelCase_ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(a_ ): lowerCAmelCase_ = final_scores[j] + ele return final_scores def lowerCamelCase ( a_ , a_ ) -> list[list[float]]: lowerCAmelCase_ = get_data(a_ ) lowerCAmelCase_ = calculate_each_score(a_ , a_ ) lowerCAmelCase_ = generate_final_scores(a_ ) # append scores to source data for i, ele in enumerate(a_ ): source_data[i].append(a_ ) return source_data
14
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: int = StableDiffusionInpaintPipeline __a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __a: int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __a: List[str] = frozenset([] ) def _lowercase ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , ) lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) lowerCAmelCase_ = CLIPTextModel(lowercase_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int: '''simple docstring''' lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) ) lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) ) if str(lowercase_ ).startswith('mps' ): lowerCAmelCase_ = torch.manual_seed(lowercase_ ) else: lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowerCAmelCase_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ ) lowerCAmelCase_ = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ ) lowerCAmelCase_ = sd_pipe(**lowercase_ ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Any: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , ) lowerCAmelCase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowercase ( self ) -> List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) lowerCAmelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting' lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' ) lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained( lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench' lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , ) lowerCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
14
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=a_ ) class a_ ( a_ ): '''simple docstring''' __a: str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) __a: ClassVar[Features] = Features({'''text''': Value('''string''' )} ) __a: ClassVar[Features] = Features({'''labels''': ClassLabel} ) __a: str = "text" __a: str = "labels" def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) lowerCAmelCase_ = copy.deepcopy(self ) lowerCAmelCase_ = self.label_schema.copy() lowerCAmelCase_ = features[self.label_column] lowerCAmelCase_ = label_schema return task_template @property def _lowercase ( self ) -> Dict[str, str]: '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
14
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a_ : '''simple docstring''' __a: int __a: int class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = [[] for _ in range(lowercase_ )] lowerCAmelCase_ = size def __getitem__( self , lowercase_ ) -> Iterator[Edge]: '''simple docstring''' return iter(self._graph[vertex] ) @property def _lowercase ( self ) -> List[Any]: '''simple docstring''' return self._size def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) ) def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None: '''simple docstring''' lowerCAmelCase_ = deque([start_vertex] ) lowerCAmelCase_ = [None] * self.size lowerCAmelCase_ = 0 while queue: lowerCAmelCase_ = queue.popleft() lowerCAmelCase_ = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCAmelCase_ = current_distance + edge.weight lowerCAmelCase_ = distances[edge.destination_vertex] if ( isinstance(lowercase_ , lowercase_ ) and new_distance >= dest_vertex_distance ): continue lowerCAmelCase_ = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
14
1
def lowerCamelCase ( a_ ) -> Optional[int]: lowerCAmelCase_ = [0] * len(a_ ) lowerCAmelCase_ = [] lowerCAmelCase_ = [1] * len(a_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(a_ ) ): if indegree[i] == 0: queue.append(a_ ) while queue: lowerCAmelCase_ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowerCAmelCase_ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(a_ ) print(max(a_ ) ) # Adjacency list of Graph lowerCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
from __future__ import annotations lowerCamelCase_ = 1_0 def lowerCamelCase ( a_ ) -> list[int]: lowerCAmelCase_ = 1 lowerCAmelCase_ = max(a_ ) while placement <= max_digit: # declare and initialize empty buckets lowerCAmelCase_ = [[] for _ in range(a_ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCAmelCase_ = int((i / placement) % RADIX ) buckets[tmp].append(a_ ) # put each buckets' contents into list_of_ints lowerCAmelCase_ = 0 for b in range(a_ ): for i in buckets[b]: lowerCAmelCase_ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
14
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowerCamelCase ( a_ ) -> List[str]: lowerCAmelCase_ = args.pruning_method lowerCAmelCase_ = args.threshold lowerCAmelCase_ = args.model_name_or_path.rstrip('/' ) lowerCAmelCase_ = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) lowerCAmelCase_ = torch.load(os.path.join(a_ , 'pytorch_model.bin' ) ) lowerCAmelCase_ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase_ = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase_ = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: lowerCAmelCase_ = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": lowerCAmelCase_ = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) lowerCAmelCase_ = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase_ = name[:-6] lowerCAmelCase_ = model[F'''{prefix_}mask_scores'''] lowerCAmelCase_ = TopKBinarizer.apply(a_ , a_ ) lowerCAmelCase_ = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase_ = name[:-6] lowerCAmelCase_ = model[F'''{prefix_}mask_scores'''] lowerCAmelCase_ = ThresholdBinarizer.apply(a_ , a_ , a_ ) lowerCAmelCase_ = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase_ = name[:-6] lowerCAmelCase_ = model[F'''{prefix_}mask_scores'''] lowerCAmelCase_ , lowerCAmelCase_ = -0.1, 1.1 lowerCAmelCase_ = torch.sigmoid(a_ ) lowerCAmelCase_ = s * (r - l) + l lowerCAmelCase_ = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase_ = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: lowerCAmelCase_ = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) lowerCamelCase_ = parser.parse_args() main(args)
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]: # load base model lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowerCAmelCase_ = load_file(a_ ) lowerCAmelCase_ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.text_encoder else: lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) lowerCAmelCase_ = pipeline.unet # find the target layer lowerCAmelCase_ = layer_infos.pop(0 ) while len(a_ ) > -1: try: lowerCAmelCase_ = curr_layer.__getattr__(a_ ) if len(a_ ) > 0: lowerCAmelCase_ = layer_infos.pop(0 ) elif len(a_ ) == 0: break except Exception: if len(a_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowerCAmelCase_ = layer_infos.pop(0 ) lowerCAmelCase_ = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(a_ ) else: pair_keys.append(a_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 ) else: lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa ) lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(a_ , a_ ) # update visited list for item in pair_keys: visited.append(a_ ) return pipeline if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = args.base_model_path lowerCamelCase_ = args.checkpoint_path lowerCamelCase_ = args.dump_path lowerCamelCase_ = args.lora_prefix_unet lowerCamelCase_ = args.lora_prefix_text_encoder lowerCamelCase_ = args.alpha lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCamelCase_ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase ( a_ ) -> Optional[int]: lowerCAmelCase_ = FileLock(str(tmpdir / 'foo.lock' ) ) lowerCAmelCase_ = FileLock(str(tmpdir / 'foo.lock' ) ) lowerCAmelCase_ = 0.01 with locka.acquire(): with pytest.raises(a_ ): lowerCAmelCase_ = time.time() locka.acquire(a_ ) assert time.time() - _start > timeout def lowerCamelCase ( a_ ) -> Optional[int]: lowerCAmelCase_ = 'a' * 1_000 + '.lock' lowerCAmelCase_ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(a_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 lowerCAmelCase_ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(a_ ): locka.acquire(0 )
14
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase ( a_ ) -> Any: lowerCAmelCase_ = tmp_path / 'file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = tmp_path / 'malformed_file.csv' lowerCAmelCase_ = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ , a_ ) -> List[str]: lowerCAmelCase_ = tmp_path / 'csv_with_image.csv' lowerCAmelCase_ = textwrap.dedent( F'''\ image {image_file} ''' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = tmp_path / 'csv_with_label.csv' lowerCAmelCase_ = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) @pytest.fixture def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv' lowerCAmelCase_ = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(a_ , 'w' ) as f: f.write(a_ ) return str(a_ ) def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]: lowerCAmelCase_ = Csv() lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(a_ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(a_ ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase ( a_ ) -> Optional[Any]: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() lowerCAmelCase_ = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase ( a_ ) -> int: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = f.read().splitlines()[1:] lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() lowerCAmelCase_ = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels] def lowerCamelCase ( a_ ) -> Union[str, Any]: lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} ) lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] ) lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) lowerCAmelCase_ = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
14
1
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class a_ ( a_ ): '''simple docstring''' __a: torch.FloatTensor class a_ ( a_ , a_ ): '''simple docstring''' @register_to_config def __init__( self , lowercase_ = 3_2 , lowercase_ = 6_4 , lowercase_ = 2_0 , lowercase_ = 7_6_8 , lowercase_=7_7 , lowercase_=4 , lowercase_ = 0.0 , lowercase_ = "silu" , lowercase_ = None , lowercase_ = None , lowercase_ = "linear" , lowercase_ = "prd" , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[str, Any]: '''simple docstring''' super().__init__() lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = attention_head_dim lowerCAmelCase_ = num_attention_heads * attention_head_dim lowerCAmelCase_ = additional_embeddings lowerCAmelCase_ = time_embed_dim or inner_dim lowerCAmelCase_ = embedding_proj_dim or embedding_dim lowerCAmelCase_ = clip_embed_dim or embedding_dim lowerCAmelCase_ = Timesteps(lowercase_ , lowercase_ , 0 ) lowerCAmelCase_ = TimestepEmbedding(lowercase_ , lowercase_ , out_dim=lowercase_ , act_fn=lowercase_ ) lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ ) if embedding_proj_norm_type is None: lowerCAmelCase_ = None elif embedding_proj_norm_type == "layer": lowerCAmelCase_ = nn.LayerNorm(lowercase_ ) else: raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' ) lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ ) if encoder_hid_proj_type is None: lowerCAmelCase_ = None elif encoder_hid_proj_type == "linear": lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ ) else: raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' ) lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowercase_ ) ) if added_emb_type == "prd": lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowercase_ ) ) elif added_emb_type is None: lowerCAmelCase_ = None else: raise ValueError( f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' ) lowerCAmelCase_ = nn.ModuleList( [ BasicTransformerBlock( lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , activation_fn='gelu' , attention_bias=lowercase_ , ) for d in range(lowercase_ ) ] ) if norm_in_type == "layer": lowerCAmelCase_ = nn.LayerNorm(lowercase_ ) elif norm_in_type is None: lowerCAmelCase_ = None else: raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' ) lowerCAmelCase_ = nn.LayerNorm(lowercase_ ) lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ ) lowerCAmelCase_ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) lowerCAmelCase_ = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , lowercase_ , persistent=lowercase_ ) lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , lowercase_ ) ) lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , lowercase_ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _lowercase ( self ) -> Dict[str, AttentionProcessor]: '''simple docstring''' lowerCAmelCase_ = {} def fn_recursive_add_processors(lowercase_ , lowercase_ , lowercase_ ): if hasattr(lowercase_ , 'set_processor' ): lowerCAmelCase_ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowercase_ , lowercase_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowercase_ , lowercase_ , lowercase_ ) return processors def _lowercase ( self , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = len(self.attn_processors.keys() ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(lowercase_ )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(lowercase_ , lowercase_ , lowercase_ ): if hasattr(lowercase_ , 'set_processor' ): if not isinstance(lowercase_ , lowercase_ ): module.set_processor(lowercase_ ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowercase_ , lowercase_ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowercase_ , lowercase_ , lowercase_ ) def _lowercase ( self ) -> Dict: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = hidden_states.shape[0] lowerCAmelCase_ = timestep if not torch.is_tensor(lowercase_ ): lowerCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowerCAmelCase_ = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCAmelCase_ = timesteps * torch.ones(lowercase_ , dtype=timesteps.dtype , device=timesteps.device ) lowerCAmelCase_ = self.time_proj(lowercase_ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. lowerCAmelCase_ = timesteps_projected.to(dtype=self.dtype ) lowerCAmelCase_ = self.time_embedding(lowercase_ ) if self.embedding_proj_norm is not None: lowerCAmelCase_ = self.embedding_proj_norm(lowercase_ ) lowerCAmelCase_ = self.embedding_proj(lowercase_ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: lowerCAmelCase_ = self.encoder_hidden_states_proj(lowercase_ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) lowerCAmelCase_ = self.proj_in(lowercase_ ) lowerCAmelCase_ = self.positional_embedding.to(hidden_states.dtype ) lowerCAmelCase_ = [] lowerCAmelCase_ = 0 if encoder_hidden_states is not None: additional_embeds.append(lowercase_ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: lowerCAmelCase_ = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: lowerCAmelCase_ = hidden_states[:, None, :] lowerCAmelCase_ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: lowerCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowercase_ , -1 , -1 ) additional_embeds.append(lowercase_ ) lowerCAmelCase_ = torch.cat( lowercase_ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens lowerCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: lowerCAmelCase_ = F.pad( lowercase_ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) lowerCAmelCase_ = hidden_states + positional_embeddings if attention_mask is not None: lowerCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 lowerCAmelCase_ = F.pad(lowercase_ , (0, self.additional_embeddings) , value=0.0 ) lowerCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) lowerCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: lowerCAmelCase_ = self.norm_in(lowercase_ ) for block in self.transformer_blocks: lowerCAmelCase_ = block(lowercase_ , attention_mask=lowercase_ ) lowerCAmelCase_ = self.norm_out(lowercase_ ) if self.prd_embedding is not None: lowerCAmelCase_ = hidden_states[:, -1] else: lowerCAmelCase_ = hidden_states[:, additional_embeddings_len:] lowerCAmelCase_ = self.proj_to_clip_embeddings(lowercase_ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowercase_ ) def _lowercase ( self , lowercase_ ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
14
from maths.prime_factors import prime_factors def lowerCamelCase ( a_ ) -> int: if not isinstance(a_ , a_ ): lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(a_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
14
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_=0.01 , lowercase_=1_0_0_0 ) -> Any: '''simple docstring''' lowerCAmelCase_ = p_stop lowerCAmelCase_ = max_length def __iter__( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 0 lowerCAmelCase_ = False while not stop and count < self.max_length: yield count count += 1 lowerCAmelCase_ = random.random() < self.p_stop class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=False , lowercase_=True ) -> int: '''simple docstring''' lowerCAmelCase_ = [ BatchSamplerShard(lowercase_ , 2 , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) for i in range(2 ) ] lowerCAmelCase_ = [list(lowercase_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(lowercase_ ) for shard in batch_sampler_shards] , [len(lowercase_ ) for e in expected] ) self.assertListEqual(lowercase_ , lowercase_ ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowerCAmelCase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) # Check the shards when the dataset is very small. lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ ) def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) # Check the shards when the dataset is very small. lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowerCAmelCase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is very small. lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase_ ) lowerCAmelCase_ = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_ ) def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) # Check the shards when the dataset is very small. lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) lowerCAmelCase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_ ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]] lowerCAmelCase_ = [BatchSamplerShard(lowercase_ , 2 , lowercase_ , even_batches=lowercase_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False , lowercase_=2 , lowercase_=False ) -> List[Any]: '''simple docstring''' random.seed(lowercase_ ) lowerCAmelCase_ = list(lowercase_ ) lowerCAmelCase_ = [ IterableDatasetShard( lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ , num_processes=lowercase_ , process_index=lowercase_ , split_batches=lowercase_ , ) for i in range(lowercase_ ) ] lowerCAmelCase_ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(lowercase_ ) iterable_dataset_lists.append(list(lowercase_ ) ) lowerCAmelCase_ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size lowerCAmelCase_ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) self.assertTrue(len(lowercase_ ) % shard_batch_size == 0 ) lowerCAmelCase_ = [] for idx in range(0 , len(lowercase_ ) , lowercase_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(lowercase_ ) < len(lowercase_ ): reference += reference self.assertListEqual(lowercase_ , reference[: len(lowercase_ )] ) def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = 4_2 lowerCAmelCase_ = RandomIterableDataset() self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) # Edge case with a very small dataset lowerCAmelCase_ = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_ ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=lowercase_ ) lowerCAmelCase_ = SkipBatchSampler(lowercase_ , 2 ) self.assertListEqual(list(lowercase_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = DataLoader(list(range(1_6 ) ) , batch_size=4 ) lowerCAmelCase_ = skip_first_batches(lowercase_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 ) for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' Accelerator() lowerCAmelCase_ = DataLoaderDispatcher(range(1_6 ) , batch_size=4 ) for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
14
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( a_ , a_ ) -> Tuple: lowerCAmelCase_ = XCLIPTextConfig() # derive patch size from model name lowerCAmelCase_ = model_name.find('patch' ) lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ ) if "large" in model_name: lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 lowerCAmelCase_ = 12 lowerCAmelCase_ = 1_024 lowerCAmelCase_ = 4_096 lowerCAmelCase_ = 16 lowerCAmelCase_ = 24 lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 if model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = 336 lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ ) if "large" in model_name: lowerCAmelCase_ = 768 return config def lowerCamelCase ( a_ ) -> List[str]: # text encoder if name == "token_embedding.weight": lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowerCAmelCase_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowerCAmelCase_ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowerCAmelCase_ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def lowerCamelCase ( a_ , a_ ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(a_ ) if "attn.in_proj" in key: lowerCAmelCase_ = key.split('.' ) if key.startswith('visual' ): lowerCAmelCase_ = key_split[3] lowerCAmelCase_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[ :dim ] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[ -dim: ] else: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] elif key.startswith('mit' ): lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.vision_config.mit_hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[dim : dim * 2, :] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = rename_key(a_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowerCAmelCase_ = val.T lowerCAmelCase_ = val return orig_state_dict def lowerCamelCase ( a_ ) -> List[str]: if num_frames == 8: lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowerCAmelCase_ = 'eating_spaghetti.npy' elif num_frames == 32: lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy' lowerCAmelCase_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , ) lowerCAmelCase_ = np.load(a_ ) return list(a_ ) def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]: lowerCAmelCase_ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowerCAmelCase_ = model_to_url[model_name] lowerCAmelCase_ = 8 if "16-frames" in model_name: lowerCAmelCase_ = 16 elif "shot" in model_name: lowerCAmelCase_ = 32 lowerCAmelCase_ = get_xclip_config(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) model.eval() if "drive" in checkpoint_url: lowerCAmelCase_ = 'pytorch_model.bin' gdown.cached_download(a_ , a_ , quiet=a_ ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model'] else: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model'] lowerCAmelCase_ = convert_state_dict(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ ) lowerCAmelCase_ = prepare_video(a_ ) lowerCAmelCase_ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) # Verify outputs lowerCAmelCase_ = outputs.logits_per_video lowerCAmelCase_ = logits_per_video.softmax(dim=1 ) print('Probs:' , a_ ) # kinetics-400 if model_name == "xclip-base-patch32": lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(a_ , a_ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(a_ , organization='nielsr' ) processor.push_to_hub(a_ , organization='nielsr' ) slow_tokenizer.push_to_hub(a_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
1
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def lowerCamelCase ( ) -> tuple[list[int], int]: lowerCAmelCase_ = [randint(-1_000 , 1_000 ) for i in range(10 )] lowerCAmelCase_ = randint(-5_000 , 5_000 ) return (arr, r) lowerCamelCase_ = make_dataset() def lowerCamelCase ( a_ , a_ ) -> tuple[int, ...]: for triplet in permutations(a_ , 3 ): if sum(a_ ) == target: return tuple(sorted(a_ ) ) return (0, 0, 0) def lowerCamelCase ( a_ , a_ ) -> tuple[int, int, int]: arr.sort() lowerCAmelCase_ = len(a_ ) for i in range(n - 1 ): lowerCAmelCase_ , lowerCAmelCase_ = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def lowerCamelCase ( ) -> tuple[float, float]: lowerCAmelCase_ = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' lowerCAmelCase_ = '\ntriplet_sum1(*dataset)\n' lowerCAmelCase_ = '\ntriplet_sum2(*dataset)\n' lowerCAmelCase_ = repeat(setup=a_ , stmt=a_ , repeat=5 , number=10_000 ) lowerCAmelCase_ = repeat(setup=a_ , stmt=a_ , repeat=5 , number=10_000 ) return (min(a_ ), min(a_ )) if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase_ = solution_times() print(f'''The time for naive implementation is {times[0]}.''') print(f'''The time for optimized implementation is {times[1]}.''')
14
def lowerCamelCase ( a_ , a_ ) -> List[Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]: lowerCAmelCase_ = 0 while b > 0: if b & 1: lowerCAmelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
14
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {"""vocab_file""": """vocab.txt"""} lowerCamelCase_ = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } lowerCamelCase_ = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def lowerCamelCase ( a_ ) -> List[Any]: lowerCAmelCase_ = collections.OrderedDict() with open(a_ , 'r' , encoding='utf-8' ) as reader: lowerCAmelCase_ = reader.readlines() for index, token in enumerate(a_ ): lowerCAmelCase_ = token.rstrip('\n' ) lowerCAmelCase_ = index return vocab class a_ ( a_ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_="<unk>" , lowercase_=2_0_0 ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = vocab lowerCAmelCase_ = unk_token lowerCAmelCase_ = max_input_chars_per_word def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = list(lowercase_ ) if len(lowercase_ ) > self.max_input_chars_per_word: return [self.unk_token] lowerCAmelCase_ = 0 lowerCAmelCase_ = [] while start < len(lowercase_ ): lowerCAmelCase_ = len(lowercase_ ) lowerCAmelCase_ = None while start < end: lowerCAmelCase_ = ''.join(chars[start:end] ) if substr in self.vocab: lowerCAmelCase_ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowercase_ ) lowerCAmelCase_ = end return sub_tokens class a_ ( a_ ): '''simple docstring''' __a: str = VOCAB_FILES_NAMES __a: int = PRETRAINED_VOCAB_FILES_MAP __a: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a: List[Any] = ['''input_ids''', '''attention_mask'''] __a: Union[str, Any] = False def __init__( self , lowercase_ , lowercase_="<d>" , lowercase_="</d>" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<pad>" , lowercase_="<unk>" , lowercase_="</n>" , lowercase_="</_>" , lowercase_="left" , **lowercase_ , ) -> str: '''simple docstring''' requires_backends(self , ['jieba'] ) super().__init__( bod_token=lowercase_ , eod_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , unk_token=lowercase_ , line_token=lowercase_ , space_token=lowercase_ , padding_side=lowercase_ , **lowercase_ , ) lowerCAmelCase_ = bod_token lowerCAmelCase_ = eod_token lowerCAmelCase_ = load_vocab(lowercase_ ) lowerCAmelCase_ = self.encoder[space_token] lowerCAmelCase_ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowerCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_ : x[1] ) ) lowerCAmelCase_ = {v: k for k, v in self.encoder.items()} lowerCAmelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return self.encoder[self.bod_token] @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return self.encoder[self.eod_token] @property def _lowercase ( self ) -> List[Any]: '''simple docstring''' return self.encoder["\n"] @property def _lowercase ( self ) -> int: '''simple docstring''' return len(self.encoder ) def _lowercase ( self ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self , lowercase_ ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = [] for x in jieba.cut(lowercase_ , cut_all=lowercase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase_ ) ) return output_tokens def _lowercase ( self , lowercase_ , **lowercase_ ) -> Dict: '''simple docstring''' lowerCAmelCase_ = [i for i in token_ids if i >= 0] lowerCAmelCase_ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ ) -> List[str]: '''simple docstring''' return token in self.encoder def _lowercase ( self , lowercase_ ) -> str: '''simple docstring''' return "".join(lowercase_ ) def _lowercase ( self , lowercase_ ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) ) def _lowercase ( self , lowercase_ ) -> int: '''simple docstring''' return self.decoder.get(lowercase_ , self.unk_token ) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]: '''simple docstring''' if os.path.isdir(lowercase_ ): lowerCAmelCase_ = os.path.join( lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase_ = (filename_prefix + '-' if filename_prefix else '') + save_directory lowerCAmelCase_ = 0 if " " in self.encoder: lowerCAmelCase_ = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: lowerCAmelCase_ = self.encoder['\n'] del self.encoder["\n"] lowerCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_ : x[1] ) ) with open(lowercase_ , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase_ = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) return [1] + ([0] * len(lowercase_ ))
14
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class a_ ( a_ ): '''simple docstring''' __a: str = ['''vqvae'''] def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ ) def _lowercase ( self ) -> int: '''simple docstring''' return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0 @torch.no_grad() def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' lowerCAmelCase_ = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase_ , device=self.device , ) lowerCAmelCase_ = noise lowerCAmelCase_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase_ , lowercase_ ) lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ ) lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample( generator=lowercase_ )[0] lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase_ = int(mask_start_secs * pixels_per_second ) lowerCAmelCase_ = int(mask_end_secs * pixels_per_second ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase_ ): lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample'] else: lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] if isinstance(self.scheduler , lowercase_ ): lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample'] else: lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample'] if mask is not None: if mask_start > 0: lowerCAmelCase_ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample'] lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' ) lowerCAmelCase_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) ) lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , lowercase_ ) self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase_ = self.scheduler.alphas_cumprod[t] lowerCAmelCase_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase_ = 1 - alpha_prod_t lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor: '''simple docstring''' lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
14
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class a_ : '''simple docstring''' __a: List[Any] = MBartConfig __a: int = {} __a: Dict = '''gelu''' def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=3_2 , lowercase_=2 , lowercase_=4 , lowercase_=3_7 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , ) -> Dict: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCAmelCase_ = prepare_mbart_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) return config, inputs_dict def _lowercase ( self , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFMBartModel(config=lowercase_ ).get_decoder() lowerCAmelCase_ = inputs_dict['input_ids'] lowerCAmelCase_ = input_ids[:1, :] lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :] lowerCAmelCase_ = inputs_dict['head_mask'] lowerCAmelCase_ = 1 # first forward pass lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple() lowerCAmelCase_ = past_key_values[1] def lowerCamelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , a_=None , ) -> Any: if attention_mask is None: lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a_ ( a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __a: List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else () __a: Optional[int] = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __a: Optional[Any] = True __a: List[Any] = False __a: Optional[int] = False def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _lowercase ( self ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = TFMBartModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) @require_sentencepiece @require_tokenizers @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' __a: Dict = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] __a: Tuple = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] __a: Optional[int] = '''facebook/mbart-large-en-ro''' @cached_property def _lowercase ( self ) -> List[Any]: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _lowercase ( self , **lowercase_ ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = self.translate_src_text(**lowercase_ ) self.assertListEqual(self.expected_text , lowercase_ ) def _lowercase ( self , **lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = self.tokenizer(self.src_text , **lowercase_ , return_tensors='tf' ) lowerCAmelCase_ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) lowerCAmelCase_ = self.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) return generated_words @slow def _lowercase ( self ) -> Dict: '''simple docstring''' self._assert_generated_batch_equal_expected()
14
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]: def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ): lowerCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: lowerCAmelCase_ = math.ceil(val / multiple ) * multiple return x lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = output_size # determine new height and width lowerCAmelCase_ = output_height / input_height lowerCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCAmelCase_ = scale_width else: # fit height lowerCAmelCase_ = scale_height lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ ) lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ ) return (new_height, new_width) class a_ ( a_ ): '''simple docstring''' __a: Union[str, Any] = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: '''simple docstring''' super().__init__(**lowercase_ ) lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4} lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' lowerCAmelCase_ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCAmelCase_ = get_resize_output_image_size( lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict: '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: '''simple docstring''' lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(lowercase_ ) lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowerCAmelCase_ = {'pixel_values': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowercase_ ): lowerCAmelCase_ = target_sizes.numpy() lowerCAmelCase_ = [] for idx in range(len(lowercase_ ) ): lowerCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ ) lowerCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: lowerCAmelCase_ = logits.argmax(dim=1 ) lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
14
1
from __future__ import annotations lowerCamelCase_ = 1.6021e-19 # units = C def lowerCamelCase ( a_ , a_ , a_ , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
14
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
1
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class a_ ( a_ ): '''simple docstring''' __a: str = ['''vqvae'''] def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ ) def _lowercase ( self ) -> int: '''simple docstring''' return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0 @torch.no_grad() def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: '''simple docstring''' lowerCAmelCase_ = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase_ , device=self.device , ) lowerCAmelCase_ = noise lowerCAmelCase_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase_ , lowercase_ ) lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ ) lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample( generator=lowercase_ )[0] lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase_ = int(mask_start_secs * pixels_per_second ) lowerCAmelCase_ = int(mask_end_secs * pixels_per_second ) lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase_ ): lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample'] else: lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] if isinstance(self.scheduler , lowercase_ ): lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample'] else: lowerCAmelCase_ = self.scheduler.step( model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample'] if mask is not None: if mask_start > 0: lowerCAmelCase_ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample'] lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' ) lowerCAmelCase_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) ) lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) ) @torch.no_grad() def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray: '''simple docstring''' assert isinstance(self.scheduler , lowercase_ ) self.scheduler.set_timesteps(lowercase_ ) lowerCAmelCase_ = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1 lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase_ = self.scheduler.alphas_cumprod[t] lowerCAmelCase_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase_ = 1 - alpha_prod_t lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample'] lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor: '''simple docstring''' lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
14
from __future__ import annotations import queue class a_ : '''simple docstring''' def __init__( self , lowercase_ ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = data lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCamelCase ( ) -> TreeNode: print('\n********Press N to stop entering at any point of time********\n' ) lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower() lowerCAmelCase_ = queue.Queue() lowerCAmelCase_ = TreeNode(int(a_ ) ) q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = left_node q.put(a_ ) lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: ''' lowerCAmelCase_ = input(a_ ).strip().lower() or 'n' if check == "n": return tree_node lowerCAmelCase_ = TreeNode(int(a_ ) ) lowerCAmelCase_ = right_node q.put(a_ ) raise def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = queue.Queue() q.put(a_ ) while not q.empty(): lowerCAmelCase_ = [] while not q.empty(): lowerCAmelCase_ = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(a_ ) def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(a_ ) lowerCAmelCase_ = n.left # end of while means current node doesn't have left child lowerCAmelCase_ = stack.pop() # start to traverse its right child lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ = [] lowerCAmelCase_ = node while n or stack: while n: stack.append(a_ ) lowerCAmelCase_ = n.left lowerCAmelCase_ = stack.pop() print(n.data , end=',' ) lowerCAmelCase_ = n.right def lowerCamelCase ( a_ ) -> None: if not isinstance(a_ , a_ ) or not node: return lowerCAmelCase_ , lowerCAmelCase_ = [], [] lowerCAmelCase_ = node stacka.append(a_ ) while stacka: # to find the reversed order of post order, store it in stack2 lowerCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(a_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str: if not s: return "\n" + width * char lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowerCamelCase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 5_0 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
14
1
import argparse from collections import defaultdict import yaml lowerCamelCase_ = """docs/source/en/_toctree.yml""" def lowerCamelCase ( a_ ) -> Optional[int]: lowerCAmelCase_ = defaultdict(a_ ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(a_ ) lowerCAmelCase_ = new_doc_list lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(a_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) lowerCAmelCase_ = sorted(a_ , key=lambda a_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(a_ ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(a_ ) # Sort return overview_doc def lowerCamelCase ( a_=False ) -> str: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['sections'] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowerCAmelCase_ = api_doc[scheduler_idx]['sections'] lowerCAmelCase_ = clean_doc_toc(a_ ) lowerCAmelCase_ = False if new_scheduler_doc != scheduler_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_scheduler_doc if diff: if overwrite: lowerCAmelCase_ = api_doc with open(a_ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(a_ , allow_unicode=a_ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def lowerCamelCase ( a_=False ) -> Dict: with open(a_ , encoding='utf-8' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['sections'] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowerCAmelCase_ = False lowerCAmelCase_ = api_doc[pipeline_idx]['sections'] lowerCAmelCase_ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowerCAmelCase_ = pipeline_doc['section'] lowerCAmelCase_ = clean_doc_toc(a_ ) if overwrite: lowerCAmelCase_ = new_sub_pipeline_doc new_pipeline_docs.append(a_ ) # sort overall pipeline doc lowerCAmelCase_ = clean_doc_toc(a_ ) if new_pipeline_docs != pipeline_docs: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_pipeline_docs if diff: if overwrite: lowerCAmelCase_ = api_doc with open(a_ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(a_ , allow_unicode=a_ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowerCamelCase_ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
14
import baseaa def lowerCamelCase ( a_ ) -> bytes: return baseaa.baaencode(string.encode('utf-8' ) ) def lowerCamelCase ( a_ ) -> str: return baseaa.baadecode(a_ ).decode('utf-8' ) if __name__ == "__main__": lowerCamelCase_ = """Hello World!""" lowerCamelCase_ = baseaa_encode(test) print(encoded) lowerCamelCase_ = baseaa_decode(encoded) print(decoded)
14
1
from collections.abc import Iterable from typing import Any class a_ : '''simple docstring''' def __init__( self , lowercase_ = None ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = value lowerCAmelCase_ = None # Added in order to delete a node easier lowerCAmelCase_ = None lowerCAmelCase_ = None def __repr__( self ) -> str: '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 ) class a_ : '''simple docstring''' def __init__( self , lowercase_ = None ) -> List[str]: '''simple docstring''' lowerCAmelCase_ = root def __str__( self ) -> str: '''simple docstring''' return str(self.root ) def _lowercase ( self , lowercase_ , lowercase_ ) -> None: '''simple docstring''' if new_children is not None: # reset its kids lowerCAmelCase_ = node.parent if node.parent is not None: # reset its parent if self.is_right(lowercase_ ): # If it is the right children lowerCAmelCase_ = new_children else: lowerCAmelCase_ = new_children else: lowerCAmelCase_ = new_children def _lowercase ( self , lowercase_ ) -> bool: '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def _lowercase ( self ) -> bool: '''simple docstring''' return self.root is None def _lowercase ( self , lowercase_ ) -> None: '''simple docstring''' lowerCAmelCase_ = Node(lowercase_ ) # create a new Node if self.empty(): # if Tree is empty lowerCAmelCase_ = new_node # set its root else: # Tree is not empty lowerCAmelCase_ = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: lowerCAmelCase_ = new_node # We insert the new node in a leaf break else: lowerCAmelCase_ = parent_node.left else: if parent_node.right is None: lowerCAmelCase_ = new_node break else: lowerCAmelCase_ = parent_node.right lowerCAmelCase_ = parent_node def _lowercase ( self , *lowercase_ ) -> None: '''simple docstring''' for value in values: self.__insert(lowercase_ ) def _lowercase ( self , lowercase_ ) -> Node | None: '''simple docstring''' if self.empty(): raise IndexError('Warning: Tree is empty! please use another.' ) else: lowerCAmelCase_ = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: lowerCAmelCase_ = node.left if value < node.value else node.right return node def _lowercase ( self , lowercase_ = None ) -> Node | None: '''simple docstring''' if node is None: if self.root is None: return None lowerCAmelCase_ = self.root if not self.empty(): while node.right is not None: lowerCAmelCase_ = node.right return node def _lowercase ( self , lowercase_ = None ) -> Node | None: '''simple docstring''' if node is None: lowerCAmelCase_ = self.root if self.root is None: return None if not self.empty(): lowerCAmelCase_ = self.root while node.left is not None: lowerCAmelCase_ = node.left return node def _lowercase ( self , lowercase_ ) -> None: '''simple docstring''' lowerCAmelCase_ = self.search(lowercase_ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(lowercase_ , lowercase_ ) elif node.left is None: # Has only right children self.__reassign_nodes(lowercase_ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(lowercase_ , node.left ) else: lowerCAmelCase_ = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore lowerCAmelCase_ = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def _lowercase ( self , lowercase_ ) -> Iterable: '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def _lowercase ( self , lowercase_=None ) -> Any: '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def _lowercase ( self , lowercase_ , lowercase_ ) -> None: '''simple docstring''' if node: self.inorder(lowercase_ , node.left ) arr.append(node.value ) self.inorder(lowercase_ , node.right ) def _lowercase ( self , lowercase_ , lowercase_ ) -> int: '''simple docstring''' lowerCAmelCase_ = [] self.inorder(lowercase_ , lowercase_ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase ( a_ ) -> list[Node]: lowerCAmelCase_ = [] if curr_node is not None: lowerCAmelCase_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase ( ) -> None: lowerCAmelCase_ = (8, 3, 6, 1, 10, 14, 13, 4, 7) lowerCAmelCase_ = BinarySearchTree() for i in testlist: t.insert(a_ ) # Prints all the elements of the list in order traversal print(a_ ) if t.search(6 ) is not None: print('The value 6 exists' ) else: print('The value 6 doesn\'t exist' ) if t.search(-1 ) is not None: print('The value -1 exists' ) else: print('The value -1 doesn\'t exist' ) if not t.empty(): print('Max Value: ' , t.get_max().value ) # type: ignore print('Min Value: ' , t.get_min().value ) # type: ignore for i in testlist: t.remove(a_ ) print(a_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
14
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int: if attention_mask is None: lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class a_ : '''simple docstring''' __a: Tuple = OPTConfig __a: Optional[Any] = {} __a: Tuple = '''gelu''' def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any: '''simple docstring''' lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = embed_dim lowerCAmelCase_ = word_embed_proj_dim lowerCAmelCase_ = False def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , ) lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ ) return config, inputs_dict def _lowercase ( self , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowerCAmelCase_ = TFOPTModel(config=lowercase_ ) lowerCAmelCase_ = inputs_dict['input_ids'] lowerCAmelCase_ = input_ids[:1, :] lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :] lowerCAmelCase_ = 1 # first forward pass lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0] lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 ) @require_tf class a_ ( a_ , a_ , unittest.TestCase ): '''simple docstring''' __a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else () __a: Union[str, Any] = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) __a: int = False __a: List[Any] = False __a: Dict = False __a: List[Any] = 1_0 def _lowercase ( self ) -> Tuple: '''simple docstring''' lowerCAmelCase_ = TFOPTModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase_ , lowercase_ ): if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase_ , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings lowerCAmelCase_ = model_class(config=lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase_ ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCAmelCase_ = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase_ ) # check that weights remain the same after resizing lowerCAmelCase_ = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase_ ) lowerCAmelCase_ = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCAmelCase_ = False self.assertTrue(lowercase_ ) def lowerCamelCase ( a_ ) -> Any: return tf.constant(a_ , dtype=tf.intaa ) @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' __a: Optional[int] = 9_9 def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowerCAmelCase_ = input_ids.shape[0] lowerCAmelCase_ = OPTConfig( vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class a_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' ) lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id ) with tf.GradientTape(): lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state lowerCAmelCase_ = (1, 1_1, 5_1_2) self.assertEqual(output.shape , lowercase_ ) lowerCAmelCase_ = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowerCAmelCase_ = 'facebook/opt-350m' def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model ) lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowerCAmelCase_ = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ ) lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) ) @require_tf @slow class a_ ( unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-125m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) lowerCAmelCase_ = 'left' # use different length sentences to test batching lowerCAmelCase_ = [ 'Hello, my dog is a little', 'Today, I', ] lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ ) lowerCAmelCase_ = inputs['input_ids'] lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] ) lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ ) lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ ) lowerCAmelCase_ = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = 'facebook/opt-350m' lowerCAmelCase_ = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] lowerCAmelCase_ = [] lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ ) lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 ) lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ )
14
1
from __future__ import annotations from fractions import Fraction def lowerCamelCase ( a_ , a_ ) -> bool: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowerCamelCase ( a_ ) -> list[str]: lowerCAmelCase_ = [] lowerCAmelCase_ = 11 lowerCAmelCase_ = int('1' + '0' * digit_len ) for num in range(a_ , a_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(a_ , a_ ): solutions.append(F'''{num}/{den}''' ) den += 1 num += 1 lowerCAmelCase_ = 10 return solutions def lowerCamelCase ( a_ = 2 ) -> int: lowerCAmelCase_ = 1.0 for fraction in fraction_list(a_ ): lowerCAmelCase_ = Fraction(a_ ) result *= frac.denominator / frac.numerator return int(a_ ) if __name__ == "__main__": print(solution())
14
lowerCamelCase_ = 6_5_5_2_1 def lowerCamelCase ( a_ ) -> int: lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 for plain_chr in plain_text: lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER lowerCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
14
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class a_ ( a_ , a_ ): '''simple docstring''' @register_to_config def __init__( self , *, lowercase_ = 4 , lowercase_ = 7_6_8 , lowercase_ , lowercase_ , ) -> Tuple: '''simple docstring''' super().__init__() lowerCAmelCase_ = nn.Parameter(torch.zeros(lowercase_ ) ) # parameters for additional clip time embeddings lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ ) lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ ) # parameters for encoder hidden states lowerCAmelCase_ = clip_extra_context_tokens lowerCAmelCase_ = nn.Linear( lowercase_ , self.clip_extra_context_tokens * cross_attention_dim ) lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ ) lowerCAmelCase_ = nn.LayerNorm(lowercase_ ) def _lowercase ( self , *, lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowerCAmelCase_ = image_embeddings.shape[0] lowerCAmelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowerCAmelCase_ = classifier_free_guidance_embeddings.expand( lowercase_ , -1 ) lowerCAmelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowerCAmelCase_ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowerCAmelCase_ = self.embedding_proj(lowercase_ ) lowerCAmelCase_ = self.clip_image_embeddings_project_to_time_embeddings(lowercase_ ) lowerCAmelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowerCAmelCase_ = self.clip_extra_context_tokens_proj(lowercase_ ) lowerCAmelCase_ = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens ) lowerCAmelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowerCAmelCase_ = self.encoder_hidden_states_proj(lowercase_ ) lowerCAmelCase_ = self.text_encoder_hidden_states_norm(lowercase_ ) lowerCAmelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
14
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_=False ) -> Tuple: lowerCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): lowerCAmelCase_ = 'segformer.encoder.' + key if key.startswith('backbone' ): lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' ) if "norm" in key: lowerCAmelCase_ = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' ) if "layer_norm1" in key: lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ = key[key.find('block' ) + len('block' )] lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' ) if "attn.q" in key: lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase_ = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase_ = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase_ = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' ) if key.startswith('head' ): lowerCAmelCase_ = key.replace('head' , 'classifier' ) lowerCAmelCase_ = value return new_state_dict def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ) return image @torch.no_grad() def lowerCamelCase ( a_ , a_ , a_ ) -> int: lowerCAmelCase_ = SegformerConfig() lowerCAmelCase_ = False # set attributes based on model_name lowerCAmelCase_ = 'huggingface/label-files' if "segformer" in model_name: lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: lowerCAmelCase_ = 150 lowerCAmelCase_ = 'ade20k-id2label.json' lowerCAmelCase_ = (1, 150, 128, 128) elif "city" in model_name: lowerCAmelCase_ = 19 lowerCAmelCase_ = 'cityscapes-id2label.json' lowerCAmelCase_ = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: lowerCAmelCase_ = True lowerCAmelCase_ = model_name[4:6] lowerCAmelCase_ = 1_000 lowerCAmelCase_ = 'imagenet-1k-id2label.json' lowerCAmelCase_ = (1, 1_000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 256 elif size == "b2": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 6, 3] elif size == "b3": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 18, 3] elif size == "b4": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 8, 27, 3] elif size == "b5": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) lowerCAmelCase_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ ) # prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) ) else: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_ ) # create HuggingFace model and load state dict if encoder_only: lowerCAmelCase_ = False lowerCAmelCase_ = SegformerForImageClassification(a_ ) else: lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass lowerCAmelCase_ = model(a_ ) lowerCAmelCase_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCAmelCase_ = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: lowerCAmelCase_ = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
14
1