code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model'} __A = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model' ), } } __A = { 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off __A = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = ["input_ids", "attention_mask"] A_ = [] A_ = [] def __init__( self: Dict , __A: List[Any] , __A: Optional[Any]="<s>" , __A: Any="</s>" , __A: List[Any]="</s>" , __A: Optional[Any]="<s>" , __A: Optional[int]="<unk>" , __A: Any="<pad>" , __A: List[str]="<mask>" , __A: Union[str, Any]=None , __A: Tuple=None , __A: Tuple=None , __A: Optional[Dict[str, Any]] = None , __A: Optional[Any]=None , __A: Optional[int]=False , **__A: Optional[Any] , ) -> str: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs _A = legacy_behaviour super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__A , **__A , ) _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) _A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _A = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _A = 1 _A = len(self.sp_model ) _A = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A ) } _A = {v: k for k, v in self.lang_code_to_id.items()} _A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _A = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _A = src_lang if src_lang is not None else '''eng_Latn''' _A = self.lang_code_to_id[self._src_lang] _A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self: Any ) -> Dict: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self: Any , __A: List[str] ) -> Any: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def __A ( self: Tuple ) -> Union[str, Any]: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __A ( self: Tuple ) -> str: return self._src_lang @src_lang.setter def __A ( self: Any , __A: str ) -> None: _A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __A ( self: List[str] , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) _A = [1] * len(self.prefix_tokens ) _A = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__A )) + suffix_ones return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def __A ( self: int , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __A ( self: Optional[int] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self: List[str] , __A: Any , __A: str , __A: Optional[str] , __A: Optional[str] , **__A: Dict ) -> int: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) _A = src_lang _A = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) _A = self.convert_tokens_to_ids(__A ) _A = tgt_lang_id return inputs def __A ( self: str ) -> str: _A = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __A ( self: Optional[Any] , __A: str ) -> List[str]: return self.sp_model.encode(__A , out_type=__A ) def __A ( self: Any , __A: Optional[Any] ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _A = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __A ( self: List[str] , __A: int ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __A ( self: int , __A: Tuple ) -> str: _A = ''''''.join(__A ).replace(__A , ''' ''' ).strip() return out_string def __A ( self: Any , __A: str , __A: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def __A ( self: List[Any] , __A: List[str] , __A: str = "eng_Latn" , __A: Optional[List[str]] = None , __A: str = "fra_Latn" , **__A: str , ) -> BatchEncoding: _A = src_lang _A = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def __A ( self: Optional[int] ) -> Union[str, Any]: return self.set_src_lang_special_tokens(self.src_lang ) def __A ( self: str ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __A ( self: Any , __A: Any ) -> None: _A = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _A = [] _A = [self.eos_token_id, self.cur_lang_code] else: _A = [self.cur_lang_code] _A = [self.eos_token_id] def __A ( self: Union[str, Any] , __A: str ) -> None: _A = self.lang_code_to_id[lang] if self.legacy_behaviour: _A = [] _A = [self.eos_token_id, self.cur_lang_code] else: _A = [self.cur_lang_code] _A = [self.eos_token_id]
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
from typing import Dict, Optional import numpy as np import datasets __A = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' __A = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' __A = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = False , ): '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): _A = new_id # turn into Numpy arrays _A = np.array(_lowercase ) _A = np.array(_lowercase ) if reduce_labels: _A = 2_55 _A = label - 1 _A = 2_55 _A = label != ignore_index _A = np.not_equal(_lowercase , _lowercase ) _A = pred_label[mask] _A = np.array(_lowercase )[mask] _A = pred_label[pred_label == label] _A = np.histogram(_lowercase , bins=_lowercase , range=(0, num_labels - 1) )[0] _A = np.histogram(_lowercase , bins=_lowercase , range=(0, num_labels - 1) )[0] _A = np.histogram(_lowercase , bins=_lowercase , range=(0, num_labels - 1) )[0] _A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = False , ): '''simple docstring''' _A = np.zeros((num_labels,) , dtype=np.floataa ) _A = np.zeros((num_labels,) , dtype=np.floataa ) _A = np.zeros((num_labels,) , dtype=np.floataa ) _A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(_lowercase , _lowercase ): _A ,_A ,_A ,_A = intersect_and_union( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ): '''simple docstring''' _A ,_A ,_A ,_A = total_intersect_and_union( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # compute metrics _A = {} _A = total_area_intersect.sum() / total_area_label.sum() _A = total_area_intersect / total_area_union _A = total_area_intersect / total_area_label _A = np.nanmean(_lowercase ) _A = np.nanmean(_lowercase ) _A = all_acc _A = iou _A = acc if nan_to_num is not None: _A = {metric: np.nan_to_num(_lowercase , nan=_lowercase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def __A ( self: int ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def __A ( self: Optional[int] , __A: List[Any] , __A: Optional[Any] , __A: int , __A: bool , __A: Optional[int] = None , __A: Optional[Dict[int, int]] = None , __A: bool = False , ) -> Dict: _A = mean_iou( results=__A , gt_seg_maps=__A , num_labels=__A , ignore_index=__A , nan_to_num=__A , label_map=__A , reduce_labels=__A , ) return iou_result
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
import sys import turtle def __A ( _lowercase , _lowercase ): '''simple docstring''' return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def __A ( _lowercase , _lowercase , _lowercase , _lowercase , ): '''simple docstring''' my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(_lowercase , get_mid(_lowercase , _lowercase ) , get_mid(_lowercase , _lowercase ) , depth - 1 ) triangle(_lowercase , get_mid(_lowercase , _lowercase ) , get_mid(_lowercase , _lowercase ) , depth - 1 ) triangle(_lowercase , get_mid(_lowercase , _lowercase ) , get_mid(_lowercase , _lowercase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) __A = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') __A = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import math import flax.linen as nn import jax.numpy as jnp def __A ( _lowercase , _lowercase , _lowercase = 1 , _lowercase = 1 , _lowercase = 1.0e4 , _lowercase = False , _lowercase = 1.0 , ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" _A = float(embedding_dim // 2 ) _A = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) _A = min_timescale * jnp.exp(jnp.arange(_lowercase , dtype=jnp.floataa ) * -log_timescale_increment ) _A = jnp.expand_dims(_lowercase , 1 ) * jnp.expand_dims(_lowercase , 0 ) # scale embeddings _A = scale * emb if flip_sin_to_cos: _A = jnp.concatenate([jnp.cos(_lowercase ), jnp.sin(_lowercase )] , axis=1 ) else: _A = jnp.concatenate([jnp.sin(_lowercase ), jnp.cos(_lowercase )] , axis=1 ) _A = jnp.reshape(_lowercase , [jnp.shape(_lowercase )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 32 A_ = jnp.floataa @nn.compact def __call__( self: List[Any] , __A: Tuple ) -> Optional[Any]: _A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(__A ) _A = nn.silu(__A ) _A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(__A ) return temb class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 32 A_ = False A_ = 1 @nn.compact def __call__( self: Union[str, Any] , __A: List[str] ) -> Any: return get_sinusoidal_embeddings( __A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) __A = logging.getLogger(__name__) @dataclass(frozen=snake_case ) class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = 42 A_ = 42 A_ = None A_ = None A_ = None @dataclass(frozen=snake_case ) class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = 42 A_ = None A_ = None A_ = None A_ = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[int] , __A: str , __A: PreTrainedTokenizer , __A: str , __A: Optional[int] = None , __A: str=False , __A: bool = False , ) -> Dict: _A = hans_processors[task]() _A = os.path.join( __A , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__A ) , __A , ) , ) _A = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) _A ,_A = label_list[2], label_list[1] _A = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _A = cached_features_file + '''.lock''' with FileLock(__A ): if os.path.exists(__A ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) _A = torch.load(__A ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) _A = ( processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A ) ) logger.info('''Training examples: %s''' , len(__A ) ) _A = hans_convert_examples_to_features(__A , __A , __A , __A ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(self.features , __A ) def __len__( self: Dict ) -> Any: return len(self.features ) def __getitem__( self: Any , __A: List[Any] ) -> InputFeatures: return self.features[i] def __A ( self: Tuple ) -> Optional[Any]: return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = 42 def __init__( self: Tuple , __A: str , __A: PreTrainedTokenizer , __A: str , __A: Optional[int] = 1_28 , __A: Dict=False , __A: bool = False , ) -> Optional[int]: _A = hans_processors[task]() _A = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) _A ,_A = label_list[2], label_list[1] _A = label_list _A = processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A ) _A = hans_convert_examples_to_features(__A , __A , __A , __A ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(__A )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) _A = tf.data.Dataset.from_generator( __A , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def __A ( self: int ) -> Dict: return self.dataset def __len__( self: List[str] ) -> str: return len(self.features ) def __getitem__( self: int , __A: Optional[Any] ) -> InputFeatures: return self.features[i] def __A ( self: List[str] ) -> int: return self.label_list class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __A ( self: Tuple , __A: List[str] ) -> str: return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_train_set.txt''' ) ) , '''train''' ) def __A ( self: Any , __A: str ) -> int: return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def __A ( self: Tuple ) -> Any: return ["contradiction", "entailment", "neutral"] def __A ( self: List[str] , __A: str , __A: Any ) -> Any: _A = [] for i, line in enumerate(__A ): if i == 0: continue _A = '''%s-%s''' % (set_type, line[0]) _A = line[5] _A = line[6] _A = line[7][2:] if line[7].startswith('''ex''' ) else line[7] _A = line[0] examples.append(InputExample(guid=__A , text_a=__A , text_b=__A , label=__A , pairID=__A ) ) return examples def __A ( _lowercase , _lowercase , _lowercase , _lowercase , ): '''simple docstring''' _A = {label: i for i, label in enumerate(_lowercase )} _A = [] for ex_index, example in tqdm.tqdm(enumerate(_lowercase ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d''' % (ex_index) ) _A = tokenizer( example.text_a , example.text_b , add_special_tokens=_lowercase , max_length=_lowercase , padding='''max_length''' , truncation=_lowercase , return_overflowing_tokens=_lowercase , ) _A = label_map[example.label] if example.label in label_map else 0 _A = int(example.pairID ) features.append(InputFeatures(**_lowercase , label=_lowercase , pairID=_lowercase ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features __A = { 'hans': 3, } __A = { 'hans': HansProcessor, }
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
import os def __A ( _lowercase = "input.txt" ): '''simple docstring''' with open(os.path.join(os.path.dirname(_lowercase ) , _lowercase ) ) as input_file: _A = [ [int(_lowercase ) for element in line.split(''',''' )] for line in input_file.readlines() ] _A = len(_lowercase ) _A = len(matrix[0] ) _A = [[-1 for _ in range(_lowercase )] for _ in range(_lowercase )] for i in range(_lowercase ): _A = matrix[i][0] for j in range(1 , _lowercase ): for i in range(_lowercase ): _A = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _lowercase ): _A = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): _A = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __A ( _lowercase ): '''simple docstring''' _A = fname.split(os.path.sep )[-1] return re.search(R'''^(.*)_\d+\.jpg$''' , _lowercase ).groups()[0] class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Tuple=None , __A: List[str]=None ) -> Tuple: _A = file_names _A = image_transform _A = label_to_id def __len__( self: Dict ) -> str: return len(self.file_names ) def __getitem__( self: Union[str, Any] , __A: Optional[Any] ) -> int: _A = self.file_names[idx] _A = PIL.Image.open(__A ) _A = raw_image.convert('''RGB''' ) if self.image_transform is not None: _A = self.image_transform(__A ) _A = extract_label(__A ) if self.label_to_id is not None: _A = self.label_to_id[label] return {"image": image, "label": label} def __A ( _lowercase , _lowercase ): '''simple docstring''' if args.with_tracking: _A = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _A = config['''lr'''] _A = int(config['''num_epochs'''] ) _A = int(config['''seed'''] ) _A = int(config['''batch_size'''] ) _A = config['''image_size'''] if not isinstance(_lowercase , (list, tuple) ): _A = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , '''isdigit''' ): if args.checkpointing_steps == "epoch": _A = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _A = int(args.checkpointing_steps ) else: raise ValueError( f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: _A = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _A = os.path.split(_lowercase )[-1].split('''.''' )[0] accelerator.init_trackers(_lowercase , _lowercase ) # Grab all the image filenames _A = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )] # Build the label correspondences _A = [extract_label(_lowercase ) for fname in file_names] _A = list(set(_lowercase ) ) id_to_label.sort() _A = {lbl: i for i, lbl in enumerate(_lowercase )} # Set the seed before splitting the data. np.random.seed(_lowercase ) torch.manual_seed(_lowercase ) torch.cuda.manual_seed_all(_lowercase ) # Split our filenames between train and validation _A = np.random.permutation(len(_lowercase ) ) _A = int(0.8 * len(_lowercase ) ) _A = random_perm[:cut] _A = random_perm[cut:] # For training we use a simple RandomResizedCrop _A = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] ) _A = PetsDataset( [file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase ) # For evaluation, we use a deterministic Resize _A = Compose([Resize(_lowercase ), ToTensor()] ) _A = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase ) # Instantiate dataloaders. _A = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) _A = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _A = create_model('''resnet50d''' , pretrained=_lowercase , num_classes=len(_lowercase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _A = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _A = False for param in model.get_classifier().parameters(): _A = True # We normalize the batches of images to be a bit faster. _A = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device ) _A = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _A = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _A = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _A ,_A ,_A ,_A ,_A = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over _A = 0 # We also need to keep track of the starting epoch so files are named properly _A = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) _A = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _A = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _A = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _A = os.path.splitext(_lowercase )[0] if "epoch" in training_difference: _A = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1 _A = None else: _A = int(training_difference.replace('''step_''' , '''''' ) ) _A = resume_step // len(_lowercase ) resume_step -= starting_epoch * len(_lowercase ) # Now we train the model for epoch in range(_lowercase , _lowercase ): model.train() if args.with_tracking: _A = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _A = accelerator.skip_first_batches(_lowercase , _lowercase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _A = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _A = {k: v.to(accelerator.device ) for k, v in batch.items()} _A = (batch['''image'''] - mean) / std _A = model(_lowercase ) _A = torch.nn.functional.cross_entropy(_lowercase , batch['''label'''] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(_lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(_lowercase , _lowercase ): _A = f"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _A = os.path.join(args.output_dir , _lowercase ) accelerator.save_state(_lowercase ) model.eval() _A = 0 _A = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. _A = {k: v.to(accelerator.device ) for k, v in batch.items()} _A = (batch['''image'''] - mean) / std with torch.no_grad(): _A = model(_lowercase ) _A = outputs.argmax(dim=-1 ) _A ,_A = accelerator.gather_for_metrics((predictions, batch['''label''']) ) _A = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _A = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}: {1_00 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { '''accuracy''': 1_00 * eval_metric, '''train_loss''': total_loss.item() / len(_lowercase ), '''epoch''': epoch, } , step=_lowercase , ) if checkpointing_steps == "epoch": _A = f"""epoch_{epoch}""" if args.output_dir is not None: _A = os.path.join(args.output_dir , _lowercase ) accelerator.save_state(_lowercase ) if args.with_tracking: accelerator.end_training() def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument('''--data_dir''' , required=_lowercase , help='''The data folder on disk.''' ) parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' ) parser.add_argument( '''--mixed_precision''' , type=_lowercase , default=_lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--checkpointing_steps''' , type=_lowercase , default=_lowercase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , ) parser.add_argument( '''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_lowercase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _A = parser.parse_args() _A = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = int(_lowercase ) # Initialize Result _A = [] # Traverse through all denomination for denomination in reversed(_lowercase ): # Find denominations while int(_lowercase ) >= int(_lowercase ): total_value -= int(_lowercase ) answer.append(_lowercase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __A = [] __A = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): __A = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(f'Denomination {i}: ').strip())) __A = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter __A = [1, 2, 5, 10, 20, 50, 100, 500, 2000] __A = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(f'Following is minimal change for {value}: ') __A = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
import argparse import struct import unittest class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Union[str, Any] , __A: bytes ) -> None: _A = data # Initialize hash values _A = [ 0X6_a_0_9_e_6_6_7, 0Xb_b_6_7_a_e_8_5, 0X3_c_6_e_f_3_7_2, 0Xa_5_4_f_f_5_3_a, 0X5_1_0_e_5_2_7_f, 0X9_b_0_5_6_8_8_c, 0X1_f_8_3_d_9_a_b, 0X5_b_e_0_c_d_1_9, ] # Initialize round constants _A = [ 0X4_2_8_a_2_f_9_8, 0X7_1_3_7_4_4_9_1, 0Xb_5_c_0_f_b_c_f, 0Xe_9_b_5_d_b_a_5, 0X3_9_5_6_c_2_5_b, 0X5_9_f_1_1_1_f_1, 0X9_2_3_f_8_2_a_4, 0Xa_b_1_c_5_e_d_5, 0Xd_8_0_7_a_a_9_8, 0X1_2_8_3_5_b_0_1, 0X2_4_3_1_8_5_b_e, 0X5_5_0_c_7_d_c_3, 0X7_2_b_e_5_d_7_4, 0X8_0_d_e_b_1_f_e, 0X9_b_d_c_0_6_a_7, 0Xc_1_9_b_f_1_7_4, 0Xe_4_9_b_6_9_c_1, 0Xe_f_b_e_4_7_8_6, 0X0_f_c_1_9_d_c_6, 0X2_4_0_c_a_1_c_c, 0X2_d_e_9_2_c_6_f, 0X4_a_7_4_8_4_a_a, 0X5_c_b_0_a_9_d_c, 0X7_6_f_9_8_8_d_a, 0X9_8_3_e_5_1_5_2, 0Xa_8_3_1_c_6_6_d, 0Xb_0_0_3_2_7_c_8, 0Xb_f_5_9_7_f_c_7, 0Xc_6_e_0_0_b_f_3, 0Xd_5_a_7_9_1_4_7, 0X0_6_c_a_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_b_7_0_a_8_5, 0X2_e_1_b_2_1_3_8, 0X4_d_2_c_6_d_f_c, 0X5_3_3_8_0_d_1_3, 0X6_5_0_a_7_3_5_4, 0X7_6_6_a_0_a_b_b, 0X8_1_c_2_c_9_2_e, 0X9_2_7_2_2_c_8_5, 0Xa_2_b_f_e_8_a_1, 0Xa_8_1_a_6_6_4_b, 0Xc_2_4_b_8_b_7_0, 0Xc_7_6_c_5_1_a_3, 0Xd_1_9_2_e_8_1_9, 0Xd_6_9_9_0_6_2_4, 0Xf_4_0_e_3_5_8_5, 0X1_0_6_a_a_0_7_0, 0X1_9_a_4_c_1_1_6, 0X1_e_3_7_6_c_0_8, 0X2_7_4_8_7_7_4_c, 0X3_4_b_0_b_c_b_5, 0X3_9_1_c_0_c_b_3, 0X4_e_d_8_a_a_4_a, 0X5_b_9_c_c_a_4_f, 0X6_8_2_e_6_f_f_3, 0X7_4_8_f_8_2_e_e, 0X7_8_a_5_6_3_6_f, 0X8_4_c_8_7_8_1_4, 0X8_c_c_7_0_2_0_8, 0X9_0_b_e_f_f_f_a, 0Xa_4_5_0_6_c_e_b, 0Xb_e_f_9_a_3_f_7, 0Xc_6_7_1_7_8_f_2, ] _A = self.preprocessing(self.data ) self.final_hash() @staticmethod def __A ( __A: bytes ) -> bytes: _A = b'''\x80''' + (b'''\x00''' * (63 - (len(__A ) + 8) % 64)) _A = struct.pack('''>Q''' , (len(__A ) * 8) ) return data + padding + big_endian_integer def __A ( self: Dict ) -> None: # Convert into blocks of 64 bytes _A = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers _A = list(struct.unpack('''>16L''' , __A ) ) # add 48 0-ed integers words += [0] * 48 _A ,_A ,_A ,_A ,_A ,_A ,_A ,_A = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array _A = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) _A = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) _A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression _A = self.ror(__A , 6 ) ^ self.ror(__A , 11 ) ^ self.ror(__A , 25 ) _A = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g) _A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 _A = self.ror(__A , 2 ) ^ self.ror(__A , 13 ) ^ self.ror(__A , 22 ) _A = (a & b) ^ (a & c) ^ (b & c) _A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 _A ,_A ,_A ,_A ,_A ,_A ,_A ,_A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) _A = [a, b, c, d, e, f, g, h] # Modify final values _A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] _A = ''''''.join([hex(__A )[2:].zfill(8 ) for value in self.hashes] ) def __A ( self: Optional[int] , __A: int , __A: int ) -> int: return 0Xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Dict ) -> None: import hashlib _A = bytes('''Test String''' , '''utf-8''' ) self.assertEqual(SHAaaa(__A ).hash , hashlib.shaaaa(__A ).hexdigest() ) def __A ( ): '''simple docstring''' import doctest doctest.testmod() _A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) _A = parser.parse_args() _A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: _A = f.read() else: _A = bytes(_lowercase , '''utf-8''' ) print(SHAaaa(_lowercase ).hash ) if __name__ == "__main__": main()
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
import os from collections.abc import Iterator def __A ( _lowercase = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(_lowercase ): _A = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._'''] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase , _lowercase ).lstrip('''./''' ) def __A ( _lowercase ): '''simple docstring''' return f"""{i * " "}*""" if i else "\n##" def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(f"""{md_prefix(_lowercase )} {new_part.replace("_" , " " ).title()}""" ) return new_path def __A ( _lowercase = "." ): '''simple docstring''' _A = '''''' for filepath in sorted(good_file_paths(_lowercase ) ): _A ,_A = os.path.split(_lowercase ) if filepath != old_path: _A = print_path(_lowercase , _lowercase ) _A = (filepath.count(os.sep ) + 1) if filepath else 0 _A = f"""{filepath}/{filename}""".replace(''' ''' , '''%20''' ) _A = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0] print(f"""{md_prefix(_lowercase )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md('.')
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def __A ( _lowercase ): '''simple docstring''' _A = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: _A = [1_44, 1_92, 2_40] _A = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: _A = [96, 1_20, 1_44] _A = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: _A = [64, 80, 96] _A = [16, 16, 24, 48, 64, 80, 3_20] _A = 0.05 _A = 2.0 if mobilevit_name.startswith('''deeplabv3_''' ): _A = 5_12 _A = 16 _A = 21 _A = '''pascal-voc-id2label.json''' else: _A = 10_00 _A = '''imagenet-1k-id2label.json''' _A = '''huggingface/label-files''' _A = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) ) _A = {int(_lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} return config def __A ( _lowercase , _lowercase=False ): '''simple docstring''' for i in range(1 , 6 ): if f"""layer_{i}.""" in name: _A = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: _A = name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: _A = name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: _A = name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: _A = name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: _A = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: _A = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: _A = name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: _A = name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: _A = name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if f""".{i}.{j}.""" in name: _A = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if f""".{i}.{j}.""" in name: _A = name.replace(f""".{i}.{j}.""" , f""".{i}.""" ) if "expand_1x1" in name: _A = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: _A = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: _A = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if f""".global_rep.{i}.weight""" in name: _A = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' ) if f""".global_rep.{i}.bias""" in name: _A = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' ) if ".global_rep." in name: _A = name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: _A = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: _A = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: _A = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: _A = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: _A = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: _A = name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: _A = name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: _A = name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: _A = name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: _A = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: _A = name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): _A = '''mobilevit.''' + name return name def __A ( _lowercase , _lowercase , _lowercase=False ): '''simple docstring''' if base_model: _A = '''''' else: _A = '''mobilevit.''' for key in orig_state_dict.copy().keys(): _A = orig_state_dict.pop(_lowercase ) if key[:8] == "encoder.": _A = key[8:] if "qkv" in key: _A = key.split('''.''' ) _A = int(key_split[0][6:] ) - 1 _A = int(key_split[3] ) _A = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" ) _A = layer.transformer.layer[transformer_num].attention.attention.all_head_size _A = ( f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: _A = val[:dim, :] _A = val[dim : dim * 2, :] _A = val[-dim:, :] else: _A = val[:dim] _A = val[dim : dim * 2] _A = val[-dim:] else: _A = val return orig_state_dict def __A ( ): '''simple docstring''' _A = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im @torch.no_grad() def __A ( _lowercase , _lowercase , _lowercase , _lowercase=False ): '''simple docstring''' _A = get_mobilevit_config(_lowercase ) # load original state_dict _A = torch.load(_lowercase , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): _A = MobileViTForSemanticSegmentation(_lowercase ).eval() else: _A = MobileViTForImageClassification(_lowercase ).eval() _A = convert_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase ) # Check outputs on an image, prepared by MobileViTImageProcessor _A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _A = image_processor(images=prepare_img() , return_tensors='''pt''' ) _A = model(**_lowercase ) _A = outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": _A = torch.tensor( [ [[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]], [[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]], [[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": _A = torch.tensor( [ [[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]], [[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]], [[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": _A = torch.tensor( [ [[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]], [[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]], [[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]], ] ) else: raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": _A = torch.tensor([-0.98_66, 0.23_92, -1.12_41] ) elif mobilevit_name == "mobilevit_xs": _A = torch.tensor([-2.47_61, -0.93_99, -1.95_87] ) elif mobilevit_name == "mobilevit_xxs": _A = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ) else: raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowercase ) if push_to_hub: _A = { '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) _A = model_mapping[mobilevit_name] image_processor.push_to_hub(_lowercase , organization='''apple''' ) model.push_to_hub(_lowercase , organization='''apple''' ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __A = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } __A = { 'google/fnet-base': 512, 'google/fnet-large': 512, } __A = '▁' class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["input_ids", "token_type_ids"] A_ = FNetTokenizer def __init__( self: Optional[int] , __A: str=None , __A: Optional[Any]=None , __A: Dict=False , __A: Dict=True , __A: Optional[int]=True , __A: Optional[Any]="<unk>" , __A: Any="[SEP]" , __A: Dict="<pad>" , __A: Tuple="[CLS]" , __A: List[str]="[MASK]" , **__A: Any , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A = ( AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A ) if isinstance(__A , __A ) else mask_token ) super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) _A = do_lower_case _A = remove_space _A = keep_accents _A = vocab_file _A = False if not self.vocab_file else True def __A ( self: int , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __A ( self: List[Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self: Dict , __A: str , __A: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _A = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self: List[Any] , __A: Any ) -> Dict: _A = parent def __A ( self: Union[str, Any] ) -> List[Any]: return {} def __A ( ): '''simple docstring''' _A = '''<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>''' _A = ''' <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> ''' return [html_string_a, html_string_a] @require_bsa class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = MarkupLMFeatureExtractor if is_bsa_available() else None def __A ( self: Optional[int] ) -> int: _A = MarkupLMFeatureExtractionTester(self ) @property def __A ( self: Optional[Any] ) -> int: return self.feature_extract_tester.prepare_feat_extract_dict() def __A ( self: List[Any] ) -> Optional[int]: # Initialize feature_extractor _A = self.feature_extraction_class() # Test not batched input _A = get_html_strings()[0] _A = feature_extractor(__A ) # fmt: off _A = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] _A = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , __A ) self.assertEqual(encoding.xpaths , __A ) # Test batched _A = get_html_strings() _A = feature_extractor(__A ) # fmt: off _A = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] _A = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , __A ) self.assertEqual(encoding.xpaths , __A )
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
import enum import shutil import sys __A , __A = shutil.get_terminal_size() __A = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class SCREAMING_SNAKE_CASE ( enum.Enum ): """simple docstring""" A_ = 0 A_ = 1 def __A ( _lowercase , _lowercase="" ): '''simple docstring''' sys.stdout.write(str(_lowercase ) + end ) sys.stdout.flush() def __A ( _lowercase , _lowercase , _lowercase="" ): '''simple docstring''' forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , _lowercase ) def __A ( ): '''simple docstring''' forceWrite('''\r''' ) def __A ( _lowercase , _lowercase ): '''simple docstring''' forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def __A ( ): '''simple docstring''' forceWrite(''' ''' * TERMINAL_WIDTH ) reset_cursor() def __A ( ): '''simple docstring''' reset_cursor() forceWrite('''-''' * TERMINAL_WIDTH )
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
from __future__ import annotations from math import pi def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = KandinskyVaaPipeline A_ = [ "image_embeds", "negative_image_embeds", ] A_ = ["image_embeds", "negative_image_embeds"] A_ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A_ = False @property def __A ( self: Any ) -> Dict: return 32 @property def __A ( self: Optional[Any] ) -> List[Any]: return 32 @property def __A ( self: List[str] ) -> List[Any]: return self.time_input_dim @property def __A ( self: str ) -> List[str]: return self.time_input_dim * 4 @property def __A ( self: Tuple ) -> Optional[Any]: return 1_00 @property def __A ( self: Union[str, Any] ) -> str: torch.manual_seed(0 ) _A = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } _A = UNetaDConditionModel(**__A ) return model @property def __A ( self: Optional[Any] ) -> Optional[int]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __A ( self: Optional[Any] ) -> str: torch.manual_seed(0 ) _A = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self: Optional[int] ) -> List[str]: _A = self.dummy_unet _A = self.dummy_movq _A = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__A , ) _A = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __A ( self: Optional[Any] , __A: Optional[int] , __A: Tuple=0 ) -> List[str]: _A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A ) _A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __A ) if str(__A ).startswith('''mps''' ): _A = torch.manual_seed(__A ) else: _A = torch.Generator(device=__A ).manual_seed(__A ) _A = { '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def __A ( self: int ) -> Any: _A = '''cpu''' _A = self.get_dummy_components() _A = self.pipeline_class(**__A ) _A = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _A = pipe(**self.get_dummy_inputs(__A ) ) _A = output.images _A = pipe( **self.get_dummy_inputs(__A ) , return_dict=__A , )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array( [0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Optional[int] ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self: List[Any] ) -> int: _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' ) _A = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__A ) _A = KandinskyVaaPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) _A = pipeline.to(__A ) pipeline.set_progress_bar_config(disable=__A ) _A = '''red cat, 4k photo''' _A = torch.Generator(device='''cuda''' ).manual_seed(0 ) _A ,_A = pipe_prior( __A , generator=__A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() _A = torch.Generator(device='''cuda''' ).manual_seed(0 ) _A = pipeline( image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=1_00 , output_type='''np''' , ) _A = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(__A , __A )
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
__A = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __A = ['a', 'b', 'c', 'd', 'e'] def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = start # add current to visited visited.append(_lowercase ) _A = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: _A = topological_sort(_lowercase , _lowercase , _lowercase ) # if all neighbors visited add current to sort sort.append(_lowercase ) # if all vertices haven't been visited select a new one to visit if len(_lowercase ) != len(_lowercase ): for vertice in vertices: if vertice not in visited: _A = topological_sort(_lowercase , _lowercase , _lowercase ) # return sort return sort if __name__ == "__main__": __A = topological_sort('a', [], []) print(sort)
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def __A ( self: str ) -> Any: _A = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__A ).to(__A ) _A = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _A = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _A = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _A = model(input_ids.to(__A ) , labels=labels.to(__A ) ).loss _A = -(labels.shape[-1] * loss.item()) _A = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Dict , __A: Union[str, Any] , __A: Tuple=13 , __A: List[Any]=7 , __A: List[Any]=True , __A: int=True , __A: Any=True , __A: Union[str, Any]=True , __A: Dict=99 , __A: Union[str, Any]=16 , __A: Any=36 , __A: List[Any]=6 , __A: List[Any]=6 , __A: Tuple=6 , __A: List[str]=37 , __A: List[Any]="gelu" , __A: List[str]=0.1 , __A: Tuple=0.1 , __A: Union[str, Any]=5_12 , __A: int=16 , __A: int=2 , __A: Optional[Any]=0.02 , __A: List[Any]=3 , __A: Dict=4 , __A: Dict=None , ) -> Dict: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = embedding_size _A = hidden_size _A = num_hidden_layers _A = num_hidden_groups _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope def __A ( self: int ) -> str: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self: Optional[int] ) -> int: return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __A ( self: int , __A: Optional[int] , __A: str , __A: int , __A: Optional[int] , __A: Dict , __A: Optional[Any] , __A: Optional[Any] ) -> Dict: _A = AlbertModel(config=__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A ) _A = model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __A ( self: Optional[Any] , __A: str , __A: Union[str, Any] , __A: Union[str, Any] , __A: str , __A: Union[str, Any] , __A: str , __A: Dict ) -> str: _A = AlbertForPreTraining(config=__A ) model.to(__A ) model.eval() _A = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __A ( self: Optional[Any] , __A: Dict , __A: List[Any] , __A: List[str] , __A: Dict , __A: int , __A: Optional[int] , __A: Dict ) -> Union[str, Any]: _A = AlbertForMaskedLM(config=__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: str , __A: int , __A: Any , __A: Optional[int] , __A: Union[str, Any] , __A: Dict , __A: int , __A: int ) -> Dict: _A = AlbertForQuestionAnswering(config=__A ) model.to(__A ) model.eval() _A = model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self: Any , __A: Any , __A: int , __A: Optional[Any] , __A: List[str] , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> Union[str, Any]: _A = self.num_labels _A = AlbertForSequenceClassification(__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self: List[Any] , __A: List[Any] , __A: int , __A: Union[str, Any] , __A: List[str] , __A: Tuple , __A: List[str] , __A: List[Any] ) -> Union[str, Any]: _A = self.num_labels _A = AlbertForTokenClassification(config=__A ) model.to(__A ) model.eval() _A = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self: int , __A: Any , __A: Optional[Any] , __A: str , __A: List[str] , __A: List[Any] , __A: Any , __A: List[Any] ) -> Tuple: _A = self.num_choices _A = AlbertForMultipleChoice(config=__A ) model.to(__A ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self: List[str] ) -> str: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A_ = True def __A ( self: str , __A: Any , __A: int , __A: int=False ) -> int: _A = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class in get_values(__A ): _A = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A ) _A = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def __A ( self: Any ) -> List[Any]: _A = AlbertModelTester(self ) _A = ConfigTester(self , config_class=__A , hidden_size=37 ) def __A ( self: Tuple ) -> int: self.config_tester.run_common_tests() def __A ( self: List[Any] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __A ( self: Union[str, Any] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def __A ( self: Any ) -> Tuple: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__A ) def __A ( self: Dict ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__A ) def __A ( self: Optional[Any] ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A ) def __A ( self: List[Any] ) -> Tuple: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A ) def __A ( self: Union[str, Any] ) -> str: _A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A = type self.model_tester.create_and_check_model(*__A ) @slow def __A ( self: Optional[int] ) -> Union[str, Any]: for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AlbertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def __A ( self: Union[str, Any] ) -> Optional[Any]: _A = AlbertModel.from_pretrained('''albert-base-v2''' ) _A = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) _A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _A = model(__A , attention_mask=__A )[0] _A = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , __A ) _A = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt') def __A ( _lowercase , _lowercase , _lowercase = 1_60_00 ): '''simple docstring''' _A = int(round(sample_rate * max_length ) ) if len(_lowercase ) <= sample_length: return wav _A = randint(0 , len(_lowercase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = field(default=snake_case , metadata={"help": "Name of a dataset from the datasets package"} ) A_ = field( default=snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) A_ = field( default=snake_case , metadata={"help": "A file containing the training audio paths and labels."} ) A_ = field( default=snake_case , metadata={"help": "A file containing the validation audio paths and labels."} ) A_ = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) A_ = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) A_ = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , ) A_ = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) A_ = field( default=snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) A_ = field( default=snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) A_ = field( default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" A_ = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) A_ = field( default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A_ = field( default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) A_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) A_ = field( default=snake_case , metadata={"help": "Name or path of preprocessor config."} ) A_ = field( default=snake_case , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) A_ = field( default=snake_case , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) A_ = field( default=snake_case , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) A_ = field( default=snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) A_ = field( default=snake_case , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def __A ( self: Optional[Any] ) -> List[Any]: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __A , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def __A ( ): '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A ,_A ,_A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A ,_A ,_A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _lowercase , _lowercase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_lowercase ) transformers.utils.logging.set_verbosity(_lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _A = DatasetDict() _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f"""{", ".join(raw_datasets["train"].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ '''Make sure to set `--label_column_name` to the correct text column - one of ''' f"""{", ".join(raw_datasets["train"].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _A = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _A = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _A = feature_extractor.model_input_names[0] def train_transforms(_lowercase ): _A = [] for audio in batch[data_args.audio_column_name]: _A = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_lowercase ) _A = feature_extractor(_lowercase , sampling_rate=feature_extractor.sampling_rate ) _A = {model_input_name: inputs.get(_lowercase )} _A = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_lowercase ): _A = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _A = feature_extractor(_lowercase , sampling_rate=feature_extractor.sampling_rate ) _A = {model_input_name: inputs.get(_lowercase )} _A = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _A = raw_datasets['''train'''].features[data_args.label_column_name].names _A ,_A = {}, {} for i, label in enumerate(_lowercase ): _A = str(_lowercase ) _A = label # Load the accuracy metric from the datasets package _A = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_lowercase ): _A = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_lowercase , references=eval_pred.label_ids ) _A = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowercase ) , labelaid=_lowercase , idalabel=_lowercase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _A = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_lowercase , output_all_columns=_lowercase ) if training_args.do_eval: if data_args.max_eval_samples is not None: _A = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_lowercase , output_all_columns=_lowercase ) # Initialize our trainer _A = Trainer( model=_lowercase , args=_lowercase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_lowercase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('''eval''' , _lowercase ) trainer.save_metrics('''eval''' , _lowercase ) # Write model card and (optionally) push to hub _A = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_lowercase ) else: trainer.create_model_card(**_lowercase ) if __name__ == "__main__": main()
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig __A = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "maskformer" A_ = {"hidden_size": "mask_feature_size"} A_ = ["resnet", "swin"] A_ = ["detr"] def __init__( self: int , __A: int = 2_56 , __A: int = 2_56 , __A: float = 0.1 , __A: bool = False , __A: Optional[Dict] = None , __A: Optional[Dict] = None , __A: float = 0.02 , __A: float = 1.0 , __A: float = 1.0 , __A: float = 1.0 , __A: float = 20.0 , __A: Optional[bool] = None , **__A: Optional[Any] , ) -> List[Any]: if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _A = SwinConfig( image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(__A , __A ): _A = backbone_config.pop('''model_type''' ) _A = CONFIG_MAPPING[backbone_model_type] _A = config_class.from_dict(__A ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _A = DetrConfig() else: # verify that the decoder is supported _A = ( decoder_config.pop('''model_type''' ) if isinstance(__A , __A ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"""Transformer Decoder {decoder_type} not supported, please use one of""" f""" {",".join(self.decoders_supported )}""" ) if isinstance(__A , __A ): _A = CONFIG_MAPPING[decoder_type] _A = config_class.from_dict(__A ) _A = backbone_config _A = decoder_config # main feature dimension for the model _A = fpn_feature_size _A = mask_feature_size # initializer _A = init_std _A = init_xavier_std # Hungarian matcher && loss _A = cross_entropy_weight _A = dice_weight _A = mask_weight _A = use_auxiliary_loss _A = no_object_weight _A = output_auxiliary_logits _A = self.decoder_config.encoder_attention_heads _A = self.decoder_config.num_hidden_layers super().__init__(**__A ) @classmethod def __A ( cls: Dict , __A: PretrainedConfig , __A: PretrainedConfig , **__A: Any ) -> int: return cls( backbone_config=__A , decoder_config=__A , **__A , ) def __A ( self: int ) -> Dict[str, any]: _A = copy.deepcopy(self.__dict__ ) _A = self.backbone_config.to_dict() _A = self.decoder_config.to_dict() _A = self.__class__.model_type return output
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import baseaa def __A ( _lowercase ): '''simple docstring''' return baseaa.aaaencode(string.encode('''utf-8''' ) ) def __A ( _lowercase ): '''simple docstring''' return baseaa.aaadecode(_lowercase ).decode('''utf-8''' ) if __name__ == "__main__": import doctest doctest.testmod()
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = AutoConfig.from_pretrained(_lowercase ) _A = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowercase ) _A = checkpoints.load_tax_checkpoint(_lowercase ) _A = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": _A = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": _A = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): _A = f"""layers_{str(_lowercase )}""" # Self-Attention _A = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] _A = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] _A = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] _A = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization _A = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: _A = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _A = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _A = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _A = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _A = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _A = flax_model.params['''encoder''']['''block'''][str(_lowercase )]['''layer'''] _A = tax_attention_key _A = tax_attention_out _A = tax_attention_query _A = tax_attention_value _A = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_global_layer_norm if split_mlp_wi: _A = tax_mlp_wi_a _A = tax_mlp_wi_a else: _A = tax_mlp_wi _A = tax_mlp_wo _A = tax_mlp_layer_norm _A = flax_model_encoder_layer_block # Only for layer 0: _A = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T _A = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T _A = tax_encoder_global_rel_embedding # Assigning _A = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] _A = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _A = f"""layers_{str(_lowercase )}""" # Self-Attention _A = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] _A = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] _A = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] _A = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization _A = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention _A = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] _A = tax_enc_dec_attention_module['''key''']['''kernel'''] _A = tax_enc_dec_attention_module['''out''']['''kernel'''] _A = tax_enc_dec_attention_module['''query''']['''kernel'''] _A = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization _A = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: _A = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _A = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _A = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _A = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _A = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _A = flax_model.params['''decoder''']['''block'''][str(_lowercase )]['''layer'''] _A = tax_attention_key _A = tax_attention_out _A = tax_attention_query _A = tax_attention_value _A = tax_pre_attention_layer_norm _A = tax_enc_dec_attention_key _A = tax_enc_dec_attention_out _A = tax_enc_dec_attention_query _A = tax_enc_dec_attention_value _A = tax_cross_layer_norm if split_mlp_wi: _A = tax_mlp_wi_a _A = tax_mlp_wi_a else: _A = tax_mlp_wi _A = tax_mlp_wo _A = txa_mlp_layer_norm _A = flax_model_decoder_layer_block # Decoder Normalization _A = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] _A = txa_decoder_norm # Only for layer 0: _A = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T _A = tax_decoder_rel_embedding # Token Embeddings _A = tax_model['''target''']['''token_embedder''']['''embedding'''] _A = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _A = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(_lowercase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) __A = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __A ( _lowercase , _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _A = ParquetDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read() _check_parquet_dataset(_lowercase , _lowercase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = features.copy() if features else default_expected_features _A = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) _A = ParquetDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read() _check_parquet_dataset(_lowercase , _lowercase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = ParquetDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read() _check_parquet_dataset(_lowercase , _lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if issubclass(_lowercase , _lowercase ): _A = parquet_path elif issubclass(_lowercase , _lowercase ): _A = [parquet_path] _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = ParquetDatasetReader(_lowercase , cache_dir=_lowercase ).read() _check_parquet_dataset(_lowercase , _lowercase ) def __A ( _lowercase , _lowercase , _lowercase=("train",) ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) for split in splits: _A = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _A = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read() _check_parquet_datasetdict(_lowercase , _lowercase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = features.copy() if features else default_expected_features _A = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) _A = ParquetDatasetReader({'''train''': parquet_path} , features=_lowercase , cache_dir=_lowercase ).read() _check_parquet_datasetdict(_lowercase , _lowercase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if split: _A = {split: parquet_path} else: _A = '''train''' _A = {'''train''': parquet_path, '''test''': parquet_path} _A = tmp_path / '''cache''' _A = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} _A = ParquetDatasetReader(_lowercase , cache_dir=_lowercase ).read() _check_parquet_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = ParquetDatasetWriter(_lowercase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 _A = pq.ParquetFile(tmp_path / '''foo.parquet''' ) _A = pf.read() assert dataset.data.table == output_table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = str(shared_datadir / '''test_image_rgb.jpg''' ) _A = {'''image''': [image_path]} _A = Features({'''image''': Image()} ) _A = Dataset.from_dict(_lowercase , features=_lowercase ) _A = ParquetDatasetWriter(_lowercase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 _A = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features _A = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=_lowercase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __A ( _lowercase , _lowercase ): '''simple docstring''' assert get_writer_batch_size(_lowercase ) == expected
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __A = logging.get_logger(__name__) __A = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "marian" A_ = ["past_key_values"] A_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self: Union[str, Any] , __A: Any=5_81_01 , __A: Optional[Any]=None , __A: List[str]=10_24 , __A: Any=12 , __A: Tuple=40_96 , __A: Union[str, Any]=16 , __A: Tuple=12 , __A: Optional[int]=40_96 , __A: List[Any]=16 , __A: Optional[Any]=0.0 , __A: Optional[Any]=0.0 , __A: Optional[int]=True , __A: str=True , __A: Tuple="gelu" , __A: str=10_24 , __A: str=0.1 , __A: Dict=0.0 , __A: List[str]=0.0 , __A: List[Any]=0.02 , __A: int=5_81_00 , __A: Tuple=False , __A: Optional[int]=5_81_00 , __A: int=0 , __A: Optional[int]=0 , __A: Optional[int]=True , **__A: Dict , ) -> Dict: _A = vocab_size _A = decoder_vocab_size or vocab_size _A = max_position_embeddings _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def __A ( self: Any ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _A = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _A = {0: '''batch'''} _A = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _A = {0: '''batch''', 1: '''decoder_sequence'''} _A = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__A , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _A = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _A ,_A = self.num_layers for i in range(__A ): _A = {0: '''batch''', 2: '''past_sequence + sequence'''} _A = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _A = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def __A ( self: int ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _A = super().outputs else: _A = super(__A , self ).outputs if self.use_past: _A ,_A = self.num_layers for i in range(__A ): _A = {0: '''batch''', 2: '''past_sequence + sequence'''} _A = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __A ( self: Optional[Any] , __A: PreTrainedTokenizer , __A: int = -1 , __A: int = -1 , __A: bool = False , __A: Optional[TensorType] = None , ) -> Mapping[str, Any]: _A = self._generate_dummy_inputs_for_encoder_and_decoder( __A , __A , __A , __A , __A ) # Generate decoder inputs _A = seq_length if not self.use_past else 1 _A = self._generate_dummy_inputs_for_encoder_and_decoder( __A , __A , __A , __A , __A ) _A = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} _A = dict(**__A , **__A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _A ,_A = common_inputs['''input_ids'''].shape _A = common_inputs['''decoder_input_ids'''].shape[1] _A ,_A = self.num_attention_heads _A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _A = decoder_seq_length + 3 _A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _A = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__A , __A )] , dim=1 ) _A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _A ,_A = self.num_layers _A = min(__A , __A ) _A = max(__A , __A ) - min_num_layers _A = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__A ): common_inputs["past_key_values"].append( ( torch.zeros(__A ), torch.zeros(__A ), torch.zeros(__A ), torch.zeros(__A ), ) ) # TODO: test this. _A = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__A , __A ): common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) ) return common_inputs def __A ( self: List[str] , __A: PreTrainedTokenizer , __A: int = -1 , __A: int = -1 , __A: bool = False , __A: Optional[TensorType] = None , ) -> Mapping[str, Any]: _A = self._generate_dummy_inputs_for_encoder_and_decoder( __A , __A , __A , __A , __A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _A ,_A = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _A = seqlen + 2 _A ,_A = self.num_layers _A ,_A = self.num_attention_heads _A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _A = common_inputs['''attention_mask'''].dtype _A = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__A , __A , dtype=__A )] , dim=1 ) _A = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A ) ] return common_inputs def __A ( self: int , __A: PreTrainedTokenizer , __A: int = -1 , __A: int = -1 , __A: bool = False , __A: Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _A = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _A = tokenizer.num_special_tokens_to_add(__A ) _A = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A ) # Generate dummy inputs according to compute batch and sequence _A = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _A = dict(tokenizer(__A , return_tensors=__A ) ) return common_inputs def __A ( self: Union[str, Any] , __A: PreTrainedTokenizer , __A: int = -1 , __A: int = -1 , __A: bool = False , __A: Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) else: _A = self._generate_dummy_inputs_for_causal_lm( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) return common_inputs def __A ( self: Dict , __A: Any , __A: Any , __A: Tuple , __A: Dict ) -> Union[str, Any]: if self.task in ["default", "seq2seq-lm"]: _A = super()._flatten_past_key_values_(__A , __A , __A , __A ) else: _A = super(__A , self )._flatten_past_key_values_( __A , __A , __A , __A ) @property def __A ( self: Any ) -> float: return 1e-4
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
from __future__ import annotations __A = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ): '''simple docstring''' _A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) ) ] # the reference grid _A = 1 _A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) ) ] # the action grid _A = init[0] _A = init[1] _A = 0 _A = g + heuristic[x][y] # cost from starting cell to destination cell _A = [[f, g, x, y]] _A = False # flag that is set when search is complete _A = False # flag set if we can't find expand while not found and not resign: if len(_lowercase ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() _A = cell.pop() _A = next_cell[2] _A = next_cell[3] _A = next_cell[1] if x == goal[0] and y == goal[1]: _A = True else: for i in range(len(_lowercase ) ): # to try out different valid actions _A = x + DIRECTIONS[i][0] _A = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(_lowercase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: _A = g + cost _A = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) _A = 1 _A = i _A = [] _A = goal[0] _A = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: _A = x - DIRECTIONS[action[x][y]][0] _A = y - DIRECTIONS[action[x][y]][1] _A = xa _A = ya invpath.append([x, y] ) _A = [] for i in range(len(_lowercase ) ): path.append(invpath[len(_lowercase ) - 1 - i] ) return path, action if __name__ == "__main__": __A = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __A = [0, 0] # all coordinates are given in format [y,x] __A = [len(grid) - 1, len(grid[0]) - 1] __A = 1 # the cost map which pushes the path closer to the goal __A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __A = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __A = 99 __A , __A = search(grid, init, goal, cost, heuristic) print('ACTION MAP') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = None A_ = None A_ = None A_ = None class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: Any , __A: int=1 , __A: Any=0 , __A: Dict=2 , __A: Dict=5_12 , __A: Union[str, Any]="cls" , __A: str=False , __A: int=True , **__A: List[str] , ) -> Optional[Any]: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = project_dim _A = pooler_fn _A = learn_encoder _A = use_attention_mask class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = [R"pooler", R"logit_scale"] A_ = [R"position_ids", R"predictions.decoder.bias"] A_ = "roberta" A_ = RobertaSeriesConfig def __init__( self: Dict , __A: Optional[Any] ) -> Any: super().__init__(__A ) _A = XLMRobertaModel(__A ) _A = nn.Linear(config.hidden_size , config.project_dim ) _A = getattr(__A , '''has_pre_transformation''' , __A ) if self.has_pre_transformation: _A = nn.Linear(config.hidden_size , config.project_dim ) _A = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def __A ( self: Any , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , ) -> Tuple: _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.base_model( input_ids=__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_attentions=__A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__A , ) if self.has_pre_transformation: _A = outputs['''hidden_states'''][-2] _A = self.pre_LN(__A ) _A = self.transformation_pre(__A ) return TransformationModelOutput( projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: _A = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
def __A ( _lowercase ): '''simple docstring''' if any(not isinstance(_lowercase , _lowercase ) or x < 0 for x in sequence ): raise TypeError('''Sequence must be list of non-negative integers''' ) for _ in range(len(_lowercase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_lowercase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __A = logging.getLogger(__name__) def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=_lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=_lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=_lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=_lowercase , default=10_00 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=_lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=_lowercase , type=_lowercase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=_lowercase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=_lowercase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) _A = parser.parse_args() return args def __A ( _lowercase ): '''simple docstring''' def fn(_lowercase ): return tokenizer(examples['''text'''] ) return fn def __A ( _lowercase ): '''simple docstring''' _A = [] for i in range(len(tokenized_data['''input_ids'''] ) ): _A = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } _A = tf.train.Features(feature=_lowercase ) _A = tf.train.Example(features=_lowercase ) _A = example.SerializeToString() records.append(_lowercase ) return records def __A ( _lowercase ): '''simple docstring''' _A = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _A = min(len(_lowercase ) , args.limit ) _A = dataset.select(range(_lowercase ) ) print(f"""Limiting the dataset to {args.limit} entries.""" ) _A = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _A = os.path.join(args.output_dir , args.split ) if not os.path.exists(_lowercase ): os.makedirs(_lowercase ) else: _A = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _A = tokenize_function(_lowercase ) _A = dataset.map(_lowercase , batched=_lowercase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_lowercase ): # Concatenate all texts. _A = {k: sum(examples[k] , [] ) for k in examples.keys()} _A = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _A = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _A = { k: [t[i : i + args.max_length] for i in range(0 , _lowercase , args.max_length )] for k, t in concatenated_examples.items() } return result _A = dataset_tokenized.map(_lowercase , batched=_lowercase , batch_size=10_00 , num_proc=4 ) _A = 0 _A = 0 for shard in range(0 , len(_lowercase ) , args.shard_size ): _A = grouped_dataset[shard : shard + args.shard_size] _A = len(dataset_snapshot['''input_ids'''] ) _A = os.path.join(_lowercase , f"""dataset-{shard_count}-{records_containing}.tfrecord""" ) _A = get_serialized_examples(_lowercase ) with tf.io.TFRecordWriter(_lowercase ) as out_file: for i in range(len(_lowercase ) ): _A = serialized_examples[i] out_file.write(_lowercase ) print('''Wrote file {} containing {} records'''.format(_lowercase , _lowercase ) ) shard_count += 1 total_records += records_containing with open(f"""split-{args.split}-records-count.txt""" , '''w''' ) as f: print(f"""Total {args.split} records: {total_records}""" , file=_lowercase ) if __name__ == "__main__": __A = parse_args() main(args)
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Union[str, Any] , __A: int , __A: int ) -> Optional[int]: if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) _A = img _A = img.shape[1] _A = img.shape[0] _A = dst_width _A = dst_height _A = self.src_w / self.dst_w _A = self.src_h / self.dst_h _A = _A = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55 ) def __A ( self: Optional[int] ) -> List[str]: for i in range(self.dst_h ): for j in range(self.dst_w ): _A = self.img[self.get_y(__A )][self.get_x(__A )] def __A ( self: List[str] , __A: int ) -> int: return int(self.ratio_x * x ) def __A ( self: Dict , __A: int ) -> int: return int(self.ratio_y * y ) if __name__ == "__main__": __A , __A = 800, 600 __A = imread('image_data/lena.jpg', 1) __A = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output ) waitKey(0) destroyAllWindows()
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __A = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def __A ( _lowercase=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=snake_case ) ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = None A_ = None def __A ( self: Union[str, Any] , __A: Dict , __A: Any ) -> Any: with TemporaryDirectory() as tmp_dir: _A = dataset_module_factory(__A , cache_dir=__A ) _A = import_main_class(dataset_module.module_path , dataset=__A ) _A = builder_cls( cache_dir=__A , config_name=__A , hash=dataset_module.hash , ) _A = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) _A = cached_path(__A , cache_dir=__A ) self.assertTrue(os.path.exists(__A ) ) @pytest.mark.integration def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' _A = dataset_module_factory('''wikipedia''' , cache_dir=_lowercase ) _A = import_main_class(dataset_module.module_path ) _A = builder_cls( cache_dir=_lowercase , config_name='''20220301.frr''' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _A = None builder_instance.download_and_prepare() _A = builder_instance.as_dataset() assert ds @pytest.mark.integration def __A ( _lowercase ): '''simple docstring''' _A = dataset_module_factory('''wikipedia''' , cache_dir=_lowercase ) _A = import_main_class(dataset_module.module_path , dataset=_lowercase ) _A = builder_cls( cache_dir=_lowercase , config_name='''20220301.frr''' , hash=dataset_module.hash , ) _A = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowercase , _lowercase ) assert "train" in ds assert isinstance(ds['''train'''] , _lowercase ) assert next(iter(ds['''train'''] ) )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
import argparse import os import re import packaging.version __A = 'examples/' __A = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } __A = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } __A = 'README.md' def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.read() _A ,_A = REPLACE_PATTERNS[pattern] _A = replace.replace('''VERSION''' , _lowercase ) _A = re_pattern.sub(_lowercase , _lowercase ) with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(_lowercase ) def __A ( _lowercase ): '''simple docstring''' for folder, directories, fnames in os.walk(_lowercase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' ) def __A ( _lowercase , _lowercase=False ): '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_lowercase , _lowercase , _lowercase ) if not patch: update_version_in_examples(_lowercase ) def __A ( ): '''simple docstring''' _A = '''🤗 Transformers currently provides the following architectures''' _A = '''1. Want to contribute a new model?''' with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() # Find the start of the list. _A = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _A = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _A = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_lowercase ) def __A ( ): '''simple docstring''' with open(REPLACE_FILES['''init'''] , '''r''' ) as f: _A = f.read() _A = REPLACE_PATTERNS['''init'''][0].search(_lowercase ).groups()[0] return packaging.version.parse(_lowercase ) def __A ( _lowercase=False ): '''simple docstring''' _A = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _A = default_version.base_version elif patch: _A = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _A = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _A = input(f"""Which version are you releasing? [{default_version}]""" ) if len(_lowercase ) == 0: _A = default_version print(f"""Updating version to {version}.""" ) global_version_update(_lowercase , patch=_lowercase ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def __A ( ): '''simple docstring''' _A = get_version() _A = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _A = current_version.base_version # Check with the user we got that right. _A = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(_lowercase ) == 0: _A = dev_version print(f"""Updating version to {version}.""" ) global_version_update(_lowercase ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __A = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A = logging.getLogger() def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() parser.add_argument('''-f''' ) _A = parser.parse_args() return args.f def __A ( _lowercase ): '''simple docstring''' _A = {} _A = os.path.join(_lowercase , '''all_results.json''' ) if os.path.exists(_lowercase ): with open(_lowercase , '''r''' ) as f: _A = json.load(_lowercase ) else: raise ValueError(f"""can't find {path}""" ) return results def __A ( ): '''simple docstring''' _A = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() __A = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @classmethod def __A ( cls: List[Any] ) -> List[Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU _A = tempfile.mkdtemp() _A = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _A = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def __A ( cls: List[str] ) -> Optional[Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: Optional[int] ) -> List[Any]: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: int ) -> List[str]: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertLess(result['''perplexity'''] , 1_00 ) self.assertTrue(os.path.exists(os.path.join(__A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: int ) -> Union[str, Any]: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(__A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: Optional[int] ) -> List[str]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _A = 7 if get_gpu_count() > 1 else 2 _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: Tuple ) -> int: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _A = get_results(__A ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(__A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: List[str] ) -> Tuple: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__A , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: List[Any] ) -> Optional[Any]: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(__A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: Any ) -> Union[str, Any]: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(__A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''translation_no_trainer''' ) ) ) @slow def __A ( self: Union[str, Any] ) -> Tuple: _A = logging.StreamHandler(sys.stdout ) logger.addHandler(__A ) _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) _A = get_results(__A ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: str ) -> Dict: _A = self.get_auto_remove_tmp_dir() _A = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _A = get_results(__A ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__A , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , '''image_classification_no_trainer''' ) ) )
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
from __future__ import annotations def __A ( _lowercase , _lowercase ): '''simple docstring''' if len(_lowercase ) <= 1 or n <= 1: return insert_next(_lowercase , n - 1 ) rec_insertion_sort(_lowercase , n - 1 ) def __A ( _lowercase , _lowercase ): '''simple docstring''' if index >= len(_lowercase ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order _A ,_A = ( collection[index], collection[index - 1], ) insert_next(_lowercase , index + 1 ) if __name__ == "__main__": __A = input('Enter integers separated by spaces: ') __A = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = len(_lowercase ) print('''The following activities are selected:''' ) # The first activity is always selected _A = 0 print(_lowercase , end=''',''' ) # Consider rest of the activities for j in range(_lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(_lowercase , end=''',''' ) _A = j if __name__ == "__main__": import doctest doctest.testmod() __A = [1, 3, 0, 5, 8, 5] __A = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow __A = False class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: str , __A: Optional[Any]=32 ) -> int: set_seed(0 ) _A = UNetaDModel(sample_size=__A , in_channels=3 , out_channels=3 ) _A = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def __A ( self: Union[str, Any] ) -> Tuple: _A = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _A = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__A , ) _A = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__A , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) _A = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__A ) for _ in range(4 )] _A = [torch.randn((4, 3, 32, 32) ).to(__A ) for _ in range(4 )] _A = [torch.randint(0 , 10_00 , (4,) ).long().to(__A ) for _ in range(4 )] # train with a DDPM scheduler _A ,_A = self.get_model_optimizer(resolution=32 ) model.train().to(__A ) for i in range(4 ): optimizer.zero_grad() _A = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _A = model(__A , timesteps[i] ).sample _A = torch.nn.functional.mse_loss(__A , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _A ,_A = self.get_model_optimizer(resolution=32 ) model.train().to(__A ) for i in range(4 ): optimizer.zero_grad() _A = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _A = model(__A , timesteps[i] ).sample _A = torch.nn.functional.mse_loss(__A , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__A , __A , atol=1e-5 ) ) self.assertTrue(torch.allclose(__A , __A , atol=1e-5 ) )
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) __A = parser.parse_args() __A = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import requests from bsa import BeautifulSoup def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = BeautifulSoup(requests.get(_lowercase , params=_lowercase ).content , '''html.parser''' ) _A = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) _A = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": __A = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 2018, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
__A = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __A = [{'type': 'code', 'content': INSTALL_CONTENT}] __A = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A = logging.get_logger(__name__) __A = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "gptj" A_ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self: Any , __A: List[Any]=5_04_00 , __A: Tuple=20_48 , __A: Any=40_96 , __A: List[str]=28 , __A: Optional[int]=16 , __A: Any=64 , __A: Any=None , __A: List[Any]="gelu_new" , __A: Any=0.0 , __A: int=0.0 , __A: Tuple=0.0 , __A: List[str]=1e-5 , __A: int=0.02 , __A: Any=True , __A: str=5_02_56 , __A: Dict=5_02_56 , __A: List[str]=False , **__A: Tuple , ) -> Union[str, Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = n_inner _A = rotary_dim _A = activation_function _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = use_cache _A = bos_token_id _A = eos_token_id super().__init__( bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: List[str] , __A: PretrainedConfig , __A: str = "default" , __A: List[PatchingSpec] = None , __A: bool = False , ) -> List[str]: super().__init__(__A , task=__A , patching_specs=__A , use_past=__A ) if not getattr(self._config , '''pad_token_id''' , __A ): # TODO: how to do that better? _A = 0 @property def __A ( self: Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: _A = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='''inputs''' ) _A = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __A ( self: Any ) -> int: return self._config.n_layer @property def __A ( self: Any ) -> int: return self._config.n_head def __A ( self: List[Any] , __A: PreTrainedTokenizer , __A: int = -1 , __A: int = -1 , __A: bool = False , __A: Optional[TensorType] = None , ) -> Mapping[str, Any]: _A = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() _A = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _A ,_A = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _A = seqlen + 2 _A = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _A = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] _A = common_inputs['''attention_mask'''] if self.use_past: _A = ordered_inputs['''attention_mask'''].dtype _A = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def __A ( self: int ) -> int: return 13
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "transfo-xl" A_ = ["mems"] A_ = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self: int , __A: str=26_77_35 , __A: Tuple=[2_00_00, 4_00_00, 20_00_00] , __A: Any=10_24 , __A: int=10_24 , __A: Optional[int]=16 , __A: List[str]=64 , __A: List[Any]=40_96 , __A: Optional[int]=4 , __A: Optional[int]=False , __A: str=18 , __A: str=16_00 , __A: str=10_00 , __A: Optional[Any]=True , __A: Optional[int]=True , __A: Optional[Any]=0 , __A: Optional[int]=-1 , __A: int=True , __A: Optional[int]=0.1 , __A: Dict=0.0 , __A: Optional[Any]=True , __A: int="normal" , __A: Any=0.01 , __A: Optional[Any]=0.01 , __A: Optional[int]=0.02 , __A: Dict=1e-5 , __A: List[Any]=0 , **__A: Tuple , ) -> Dict: _A = vocab_size _A = [] self.cutoffs.extend(__A ) if proj_share_all_but_first: _A = [False] + [True] * len(self.cutoffs ) else: _A = [False] + [False] * len(self.cutoffs ) _A = d_model _A = d_embed _A = d_head _A = d_inner _A = div_val _A = pre_lnorm _A = n_layer _A = n_head _A = mem_len _A = same_length _A = attn_type _A = clamp_len _A = sample_softmax _A = adaptive _A = dropout _A = dropatt _A = untie_r _A = init _A = init_range _A = proj_init_std _A = init_std _A = layer_norm_epsilon super().__init__(eos_token_id=__A , **__A ) @property def __A ( self: Any ) -> Optional[int]: # Message copied from Transformer-XL documentation logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def __A ( self: Union[str, Any] , __A: Tuple ) -> Optional[Any]: # Message copied from Transformer-XL documentation raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = ["image_processor", "tokenizer"] A_ = "AutoImageProcessor" A_ = "AutoTokenizer" def __init__( self: Tuple , __A: List[Any]=None , __A: Union[str, Any]=None , **__A: Union[str, Any] ) -> List[Any]: _A = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __A , ) _A = kwargs.pop('''feature_extractor''' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__A , __A ) _A = self.image_processor _A = False def __call__( self: Union[str, Any] , *__A: Optional[int] , **__A: Tuple ) -> Any: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__A , **__A ) _A = kwargs.pop('''images''' , __A ) _A = kwargs.pop('''text''' , __A ) if len(__A ) > 0: _A = args[0] _A = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: _A = self.image_processor(__A , *__A , **__A ) if text is not None: _A = self.tokenizer(__A , **__A ) if text is None: return inputs elif images is None: return encodings else: _A = encodings['''input_ids'''] return inputs def __A ( self: int , *__A: Union[str, Any] , **__A: Optional[Any] ) -> int: return self.tokenizer.batch_decode(*__A , **__A ) def __A ( self: Optional[int] , *__A: Optional[Any] , **__A: Union[str, Any] ) -> Optional[Any]: return self.tokenizer.decode(*__A , **__A ) @contextmanager def __A ( self: Tuple ) -> Optional[int]: warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) _A = True _A = self.tokenizer yield _A = self.image_processor _A = False def __A ( self: Dict , __A: str , __A: List[Any]=False , __A: Any=None ) -> Tuple: if added_vocab is None: _A = self.tokenizer.get_added_vocab() _A = {} while tokens: _A = re.search(R'''<s_(.*?)>''' , __A , re.IGNORECASE ) if start_token is None: break _A = start_token.group(1 ) _A = re.search(Rf"""</s_{key}>""" , __A , re.IGNORECASE ) _A = start_token.group() if end_token is None: _A = tokens.replace(__A , '''''' ) else: _A = end_token.group() _A = re.escape(__A ) _A = re.escape(__A ) _A = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __A , re.IGNORECASE ) if content is not None: _A = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _A = self.tokenajson(__A , is_inner_value=__A , added_vocab=__A ) if value: if len(__A ) == 1: _A = value[0] _A = value else: # leaf nodes _A = [] for leaf in content.split(R'''<sep/>''' ): _A = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _A = leaf[1:-2] # for categorical special tokens output[key].append(__A ) if len(output[key] ) == 1: _A = output[key][0] _A = tokens[tokens.find(__A ) + len(__A ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__A , added_vocab=__A ) if len(__A ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __A ( self: List[str] ) -> str: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __A , ) return self.image_processor_class @property def __A ( self: Optional[Any] ) -> int: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __A , ) return self.image_processor
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __A ( _lowercase , _lowercase=10 ): '''simple docstring''' _A = [] for _ in range(_lowercase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __A ( _lowercase , _lowercase=10 ): '''simple docstring''' _A = [] for step in range(_lowercase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(_lowercase , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _lowercase ) _A = torch.load(_lowercase ) scheduler.load_state_dict(_lowercase ) return lrs @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: str , __A: Optional[int] , __A: Optional[int] , __A: Optional[Any] ) -> Optional[int]: self.assertEqual(len(__A ) , len(__A ) ) for a, b in zip(__A , __A ): self.assertAlmostEqual(__A , __A , delta=__A ) def __A ( self: Union[str, Any] ) -> Union[str, Any]: _A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__A ) _A = torch.tensor([0.4, 0.2, -0.5] ) _A = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _A = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): _A = criterion(__A , __A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __A ( self: Optional[Any] ) -> List[Any]: _A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__A ) _A = torch.tensor([0.4, 0.2, -0.5] ) _A = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _A = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__A , weight_decay=0.0 , relative_step=__A , scale_parameter=__A , warmup_init=__A , ) for _ in range(10_00 ): _A = criterion(__A , __A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" A_ = nn.Linear(50 , 50 ) if is_torch_available() else None A_ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None A_ = 10 def __A ( self: Optional[Any] , __A: List[str] , __A: Optional[Any] , __A: Optional[int] , __A: List[Any]=None ) -> List[Any]: self.assertEqual(len(__A ) , len(__A ) ) for a, b in zip(__A , __A ): self.assertAlmostEqual(__A , __A , delta=__A , msg=__A ) def __A ( self: Tuple ) -> List[Any]: _A = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _A = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _A ,_A = data _A = scheduler_func(self.optimizer , **__A ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _A = unwrap_schedule(__A , self.num_steps ) self.assertListAlmostEqual( __A , __A , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , ) _A = scheduler_func(self.optimizer , **__A ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__A ) # wrap to test picklability of the schedule _A = unwrap_and_save_reload_schedule(__A , self.num_steps ) self.assertListEqual(__A , __A , msg=f"""failed for {scheduler_func} in save and reload""" ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Tuple ) -> Optional[Any]: _A = fn def __call__( self: int , *__A: Tuple , **__A: Optional[Any] ) -> Optional[int]: return self.fn(*__A , **__A ) @classmethod def __A ( self: Tuple , __A: Dict ) -> List[str]: _A = list(map(self , scheduler.lr_lambdas ) )
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "summarization" A_ = ["loss"] A_ = ROUGE_KEYS A_ = "rouge2" def __init__( self: List[str] , __A: Any , **__A: Tuple ) -> Optional[Any]: if hparams.sortish_sampler and hparams.gpus > 1: _A = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' ) if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' ) super().__init__(__A , num_labels=__A , mode=self.mode , **__A ) use_task_specific_params(self.model , '''summarization''' ) save_git_info(self.hparams.output_dir ) _A = Path(self.output_dir ) / '''metrics.json''' _A = Path(self.output_dir ) / '''hparams.pkl''' pickle_save(self.hparams , self.hparams_save_path ) _A = 0 _A = defaultdict(__A ) _A = self.config.model_type _A = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size _A = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } _A = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } _A = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} _A = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) _A = get_git_info()['''repo_sha'''] _A = hparams.num_workers _A = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __A ): _A = self.tokenizer.lang_code_to_id[hparams.tgt_lang] _A = self.decoder_start_token_id _A = ( SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset ) _A = False _A = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: _A = self.hparams.eval_max_gen_length else: _A = self.model.config.max_length _A = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def __A ( self: List[Any] , __A: Dict[str, torch.Tensor] ) -> Dict[str, List[str]]: _A = { k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(__A , Path(self.output_dir ) / '''text_batch.json''' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' ) _A = True return readable_batch def __A ( self: Optional[Any] , __A: Any , **__A: List[str] ) -> Any: return self.model(__A , **__A ) def __A ( self: Tuple , __A: List[int] ) -> Union[str, Any]: _A = self.tokenizer.batch_decode( __A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) return lmap(str.strip , __A ) def __A ( self: List[str] , __A: dict ) -> Tuple: _A = self.tokenizer.pad_token_id _A ,_A = batch['''input_ids'''], batch['''attention_mask'''] _A = batch['''labels'''] if isinstance(self.model , __A ): _A = self.model._shift_right(__A ) else: _A = shift_tokens_right(__A , __A ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero _A = decoder_input_ids self.save_readable_batch(__A ) _A = self(__A , attention_mask=__A , decoder_input_ids=__A , use_cache=__A ) _A = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id _A = nn.CrossEntropyLoss(ignore_index=__A ) assert lm_logits.shape[-1] == self.vocab_size _A = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: _A = nn.functional.log_softmax(__A , dim=-1 ) _A ,_A = label_smoothed_nll_loss( __A , __A , self.hparams.label_smoothing , ignore_index=__A ) return (loss,) @property def __A ( self: List[Any] ) -> int: return self.tokenizer.pad_token_id def __A ( self: List[Any] , __A: Union[str, Any] , __A: List[str] ) -> Dict: _A = self._step(__A ) _A = dict(zip(self.loss_names , __A ) ) # tokens per batch _A = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum() _A = batch['''input_ids'''].shape[0] _A = batch['''input_ids'''].eq(self.pad ).sum() _A = batch['''input_ids'''].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def __A ( self: Any , __A: List[Any] , __A: Dict ) -> Dict: return self._generative_step(__A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Tuple="val" ) -> Dict: self.step_count += 1 _A = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} _A = losses['''loss'''] _A = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } _A = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) _A = torch.tensor(__A ).type_as(__A ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(__A ) _A = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()} _A = self.step_count self.metrics[prefix].append(__A ) # callback writes this to self.metrics_save_path _A = flatten_list([x['''preds'''] for x in outputs] ) return { "log": all_metrics, "preds": preds, f"""{prefix}_loss""": loss, f"""{prefix}_{self.val_metric}""": metric_tensor, } def __A ( self: List[Any] , __A: Tuple , __A: Optional[int] ) -> Dict: return calculate_rouge(__A , __A ) def __A ( self: Optional[int] , __A: dict ) -> dict: _A = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') _A = self.model.generate( batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=__A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) _A = (time.time() - ta) / batch['''input_ids'''].shape[0] _A = self.ids_to_clean_text(__A ) _A = self.ids_to_clean_text(batch['''labels'''] ) _A = self._step(__A ) _A = dict(zip(self.loss_names , __A ) ) _A = self.calc_generative_metrics(__A , __A ) _A = np.mean(lmap(__A , __A ) ) base_metrics.update(gen_time=__A , gen_len=__A , preds=__A , target=__A , **__A ) return base_metrics def __A ( self: Dict , __A: int , __A: str ) -> List[Any]: return self._generative_step(__A ) def __A ( self: int , __A: int ) -> List[str]: return self.validation_epoch_end(__A , prefix='''test''' ) def __A ( self: Optional[Any] , __A: int ) -> SeqaSeqDataset: _A = self.n_obs[type_path] _A = self.target_lens[type_path] _A = self.dataset_class( self.tokenizer , type_path=__A , n_obs=__A , max_target_length=__A , **self.dataset_kwargs , ) return dataset def __A ( self: Optional[int] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = self.get_dataset(__A ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": _A = dataset.make_sortish_sampler(__A , distributed=self.hparams.gpus > 1 ) return DataLoader( __A , batch_size=__A , collate_fn=dataset.collate_fn , shuffle=__A , num_workers=self.num_workers , sampler=__A , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": _A = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( __A , batch_sampler=__A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( __A , batch_size=__A , collate_fn=dataset.collate_fn , shuffle=__A , num_workers=self.num_workers , sampler=__A , ) def __A ( self: Tuple ) -> DataLoader: _A = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=__A ) return dataloader def __A ( self: Optional[Any] ) -> DataLoader: return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size ) def __A ( self: str ) -> DataLoader: return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size ) @staticmethod def __A ( __A: Dict , __A: Tuple ) -> List[str]: BaseTransformer.add_model_specific_args(__A , __A ) add_generic_args(__A , __A ) parser.add_argument( '''--max_source_length''' , default=10_24 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--max_target_length''' , default=56 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--val_max_target_length''' , default=1_42 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--test_max_target_length''' , default=1_42 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument('''--freeze_encoder''' , action='''store_true''' ) parser.add_argument('''--freeze_embeds''' , action='''store_true''' ) parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=__A ) parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=__A ) parser.add_argument('''--max_tokens_per_batch''' , type=__A , default=__A ) parser.add_argument('''--logger_name''' , type=__A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' ) parser.add_argument('''--n_train''' , type=__A , default=-1 , required=__A , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_val''' , type=__A , default=5_00 , required=__A , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_test''' , type=__A , default=-1 , required=__A , help='''# examples. -1 means use all.''' ) parser.add_argument( '''--task''' , type=__A , default='''summarization''' , required=__A , help='''# examples. -1 means use all.''' ) parser.add_argument('''--label_smoothing''' , type=__A , default=0.0 , required=__A ) parser.add_argument('''--src_lang''' , type=__A , default='''''' , required=__A ) parser.add_argument('''--tgt_lang''' , type=__A , default='''''' , required=__A ) parser.add_argument('''--eval_beams''' , type=__A , default=__A , required=__A ) parser.add_argument( '''--val_metric''' , type=__A , default=__A , required=__A , choices=['''bleu''', '''rouge2''', '''loss''', None] ) parser.add_argument('''--eval_max_gen_length''' , type=__A , default=__A , help='''never generate more than n tokens''' ) parser.add_argument('''--save_top_k''' , type=__A , default=1 , required=__A , help='''How many checkpoints to save''' ) parser.add_argument( '''--early_stopping_patience''' , type=__A , default=-1 , required=__A , help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) , ) return parser class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "translation" A_ = ["loss"] A_ = ["bleu"] A_ = "bleu" def __init__( self: List[Any] , __A: Tuple , **__A: Optional[int] ) -> int: super().__init__(__A , **__A ) _A = hparams.src_lang _A = hparams.tgt_lang def __A ( self: List[str] , __A: int , __A: str ) -> dict: return calculate_bleu(__A , __A ) def __A ( _lowercase , _lowercase=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=_lowercase ) check_output_dir(_lowercase , expected_items=3 ) if model is None: if "summarization" in args.task: _A = SummarizationModule(_lowercase ) else: _A = TranslationModule(_lowercase ) _A = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): _A = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger _A = os.environ.get('''WANDB_PROJECT''' , _lowercase ) _A = WandbLogger(name=model.output_dir.name , project=_lowercase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger _A = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" ) if args.early_stopping_patience >= 0: _A = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: _A = False _A = args.val_metric == '''loss''' _A = generic_train( _lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model _A = '''''' _A = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_lowercase ) ) if checkpoints: _A = checkpoints[-1] _A = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": __A = argparse.ArgumentParser() __A = pl.Trainer.add_argparse_args(parser) __A = SummarizationModule.add_model_specific_args(parser, os.getcwd()) __A = parser.parse_args() main(args)
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: Dict , __A: Dict , __A: List[str]=None , __A: Optional[Any]=True , __A: str=None , **__A: List[str] ) -> Any: _A = parent _A = config_class _A = has_text_modality _A = kwargs _A = common_properties def __A ( self: int ) -> Optional[int]: _A = self.config_class(**self.inputs_dict ) _A = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__A , __A ) , msg=f"""`{prop}` does not exist""" ) # Test that config has the common properties as setter for idx, name in enumerate(__A ): try: setattr(__A , __A , __A ) self.parent.assertEqual( getattr(__A , __A ) , __A , msg=f"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__A ): try: _A = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__A , __A ) , __A , msg=f"""`{name} value {idx} expected, but was {getattr(__A , __A )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def __A ( self: Tuple ) -> List[str]: _A = self.config_class(**self.inputs_dict ) _A = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __A ) def __A ( self: Union[str, Any] ) -> Optional[int]: _A = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(__A , '''config.json''' ) config_first.to_json_file(__A ) _A = self.config_class.from_json_file(__A ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __A ( self: str ) -> Optional[Any]: _A = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__A ) _A = self.config_class.from_pretrained(__A ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __A ( self: Optional[Any] ) -> Dict: _A = self.config_class(**self.inputs_dict ) _A = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(__A , __A ) config_first.save_pretrained(__A ) _A = self.config_class.from_pretrained(__A , subfolder=__A ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __A ( self: int ) -> Tuple: _A = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) _A = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def __A ( self: List[str] ) -> List[Any]: if self.config_class.is_composition: return _A = self.config_class() self.parent.assertIsNotNone(__A ) def __A ( self: Dict ) -> Dict: _A = copy.deepcopy(__A ) _A = self.config_class(**__A ) _A = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(__A , __A ) != value: wrong_values.append((key, getattr(__A , __A ), value) ) if len(__A ) > 0: _A = '''\n'''.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] ) raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" ) def __A ( self: List[Any] ) -> str: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
def __A ( _lowercase ): '''simple docstring''' _A = set() # edges = list of graph's edges _A = get_edges(_lowercase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: _A ,_A = edges.pop() chosen_vertices.add(_lowercase ) chosen_vertices.add(_lowercase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_lowercase ) return chosen_vertices def __A ( _lowercase ): '''simple docstring''' _A = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "open-llama" def __init__( self: int , __A: Optional[Any]=10_00_00 , __A: Union[str, Any]=40_96 , __A: List[str]=1_10_08 , __A: Union[str, Any]=32 , __A: Any=32 , __A: str="silu" , __A: str=20_48 , __A: Any=0.02 , __A: Optional[Any]=1e-6 , __A: str=True , __A: Optional[Any]=0 , __A: List[Any]=1 , __A: int=2 , __A: str=False , __A: List[str]=True , __A: List[str]=0.1 , __A: str=0.1 , __A: str=True , __A: List[str]=True , __A: List[str]=None , **__A: List[str] , ) -> str: _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = intermediate_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = initializer_range _A = rms_norm_eps _A = use_cache _A = kwargs.pop( '''use_memorry_efficient_attention''' , __A ) _A = hidden_dropout_prob _A = attention_dropout_prob _A = use_stable_embedding _A = shared_input_output_embedding _A = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , ) def __A ( self: List[Any] ) -> Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) _A = self.rope_scaling.get('''type''' , __A ) _A = self.rope_scaling.get('''factor''' , __A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @slow @require_torch def __A ( self: Union[str, Any] ) -> Tuple: _A = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) _A = BertTokenizer.from_pretrained('''bert-base-uncased''' ) _A = bertabert.config.encoder.vocab_size _A = tokenizer.sep_token_id _A = tokenizer.cls_token_id _A = 1_28 _A = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) _A = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) _A = train_dataset.select(range(32 ) ) _A = val_dataset.select(range(16 ) ) _A = 4 def _map_to_encoder_decoder_inputs(__A: int ): # Tokenizer will automatically set [BOS] <text> [EOS] _A = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__A , max_length=5_12 ) _A = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__A , max_length=1_28 ) _A = inputs.input_ids _A = inputs.attention_mask _A = outputs.input_ids _A = outputs.input_ids.copy() _A = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] _A = outputs.attention_mask assert all(len(__A ) == 5_12 for x in inputs.input_ids ) assert all(len(__A ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(__A: int ): _A = pred.label_ids _A = pred.predictions # all unnecessary tokens are removed _A = tokenizer.batch_decode(__A , skip_special_tokens=__A ) _A = tokenizer.batch_decode(__A , skip_special_tokens=__A ) _A = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A ) return {"accuracy": accuracy} # map train dataset _A = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset _A = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) _A = self.get_auto_remove_tmp_dir() _A = SeqaSeqTrainingArguments( output_dir=__A , per_device_train_batch_size=__A , per_device_eval_batch_size=__A , predict_with_generate=__A , evaluation_strategy='''steps''' , do_train=__A , do_eval=__A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _A = SeqaSeqTrainer( model=__A , args=__A , compute_metrics=_compute_metrics , train_dataset=__A , eval_dataset=__A , tokenizer=__A , ) # start training trainer.train()
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
def __A ( _lowercase ): '''simple docstring''' if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence _A = gray_code_sequence_string(_lowercase ) # # convert them to integers for i in range(len(_lowercase ) ): _A = int(sequence[i] , 2 ) return sequence def __A ( _lowercase ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _A = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _A = gray_code_sequence_string(bit_count - 1 ) _A = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _A = '''0''' + smaller_sequence[i] sequence.append(_lowercase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _A = '''1''' + smaller_sequence[i] sequence.append(_lowercase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "ibert" def __init__( self: Any , __A: Tuple=3_05_22 , __A: List[Any]=7_68 , __A: Tuple=12 , __A: Dict=12 , __A: List[str]=30_72 , __A: str="gelu" , __A: str=0.1 , __A: Tuple=0.1 , __A: Optional[Any]=5_12 , __A: List[str]=2 , __A: Any=0.02 , __A: int=1e-12 , __A: List[Any]=1 , __A: List[Any]=0 , __A: Any=2 , __A: Optional[Any]="absolute" , __A: Tuple=False , __A: List[Any]="none" , **__A: Any , ) -> List[str]: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = quant_mode _A = force_dequant class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __A = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __A = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __A = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def __A ( self: Union[str, Any] ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def __A ( self: Optional[int] , __A: List[List[List[str]]] , __A: List[List[str]] , __A: int = 1 , __A: int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__A , hypotheses=__A , min_len=__A , max_len=__A ) }
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = f"""{sampling_rate}""" _A = '''1''' _A = '''f32le''' _A = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(_lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _A = ffmpeg_process.communicate(_lowercase ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error _A = output_stream[0] _A = np.frombuffer(_lowercase , np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def __A ( _lowercase , _lowercase , _lowercase = "f32le" , ): '''simple docstring''' _A = f"""{sampling_rate}""" _A = '''1''' if format_for_conversion == "s16le": _A = 2 elif format_for_conversion == "f32le": _A = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _A = platform.system() if system == "Linux": _A = '''alsa''' _A = '''default''' elif system == "Darwin": _A = '''avfoundation''' _A = ''':0''' elif system == "Windows": _A = '''dshow''' _A = '''default''' _A = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] _A = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _A = _ffmpeg_stream(_lowercase , _lowercase ) for item in iterator: yield item def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: _A = stream_chunk_s else: _A = chunk_length_s _A = ffmpeg_microphone(_lowercase , _lowercase , format_for_conversion=_lowercase ) if format_for_conversion == "s16le": _A = np.intaa _A = 2 elif format_for_conversion == "f32le": _A = np.floataa _A = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _A = chunk_length_s / 6 _A = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(_lowercase , (int, float) ): _A = [stride_length_s, stride_length_s] _A = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _A = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _A = datetime.datetime.now() _A = datetime.timedelta(seconds=_lowercase ) for item in chunk_bytes_iter(_lowercase , _lowercase , stride=(stride_left, stride_right) , stream=_lowercase ): # Put everything back in numpy scale _A = np.frombuffer(item['''raw'''] , dtype=_lowercase ) _A = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) _A = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __A ( _lowercase , _lowercase , _lowercase , _lowercase = False ): '''simple docstring''' _A = b'''''' _A ,_A = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _A = 0 for raw in iterator: acc += raw if stream and len(_lowercase ) < chunk_len: _A = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(_lowercase ) >= chunk_len: # We are flushing the accumulator _A = (_stride_left, stride_right) _A = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: _A = False yield item _A = stride_left _A = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(_lowercase ) > stride_left: _A = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: _A = False yield item def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = 2**24 # 16Mo try: with subprocess.Popen(_lowercase , stdout=subprocess.PIPE , bufsize=_lowercase ) as ffmpeg_process: while True: _A = ffmpeg_process.stdout.read(_lowercase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version __A = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') __A = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization __A = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } __A = sorted(arg_to_scheduler.keys()) __A = '{' + ', '.join(arg_to_scheduler_choices) + '}' class SCREAMING_SNAKE_CASE ( pl.LightningModule ): """simple docstring""" def __init__( self: Union[str, Any] , __A: argparse.Namespace , __A: Optional[int]=None , __A: List[Any]="base" , __A: int=None , __A: str=None , __A: List[Any]=None , **__A: int , ) -> int: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__A ) _A = 0 _A = Path(self.hparams.output_dir ) _A = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _A = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__A , **__A , ) else: _A = config _A = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , __A , __A ): assert hasattr(self.config , __A ), f"""model config doesn't have a `{p}` attribute""" setattr(self.config , __A , getattr(self.hparams , __A ) ) if tokenizer is None: _A = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__A , ) else: _A = tokenizer _A = MODEL_MODES[mode] if model is None: _A = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__A , ) else: _A = model def __A ( self: int , *__A: int , **__A: Optional[Any] ) -> Optional[int]: _A = self.model_type.from_pretrained(*__A , **__A ) def __A ( self: Optional[int] ) -> Optional[int]: _A = arg_to_scheduler[self.hparams.lr_scheduler] _A = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _A = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def __A ( self: str ) -> Tuple: _A = self.model _A = ['''bias''', '''LayerNorm.weight'''] _A = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _A = Adafactor( __A , lr=self.hparams.learning_rate , scale_parameter=__A , relative_step=__A ) else: _A = AdamW( __A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _A = optimizer _A = self.get_lr_scheduler() return [optimizer], [scheduler] def __A ( self: List[Any] , __A: Any , __A: Optional[Any] ) -> int: return self.validation_step(__A , __A ) def __A ( self: int , __A: Any ) -> Optional[int]: return self.validation_end(__A ) def __A ( self: Optional[int] ) -> int: _A = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _A = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __A ( self: Union[str, Any] , __A: List[str] ) -> Optional[int]: if stage == "test": _A = len(self.test_dataloader().dataset ) else: _A = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__A ) _A = len(self.train_dataloader().dataset ) def __A ( self: int , __A: str , __A: int , __A: bool = False ) -> Tuple: raise NotImplementedError('''You must implement this for your task''' ) def __A ( self: Union[str, Any] ) -> str: return self.train_loader def __A ( self: Dict ) -> int: return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__A ) def __A ( self: Optional[Any] ) -> Optional[Any]: return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__A ) def __A ( self: Tuple , __A: List[Any] ) -> List[str]: return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( __A , list(filter(__A , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __A ( self: Optional[Any] , __A: Dict[str, Any] ) -> None: _A = self.output_dir.joinpath('''best_tfmr''' ) _A = self.step_count self.model.save_pretrained(__A ) self.tokenizer.save_pretrained(__A ) @staticmethod def __A ( __A: Union[str, Any] , __A: Optional[Any] ) -> Dict: parser.add_argument( '''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=__A , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(__A ).parent / '''test_run''' / '''cache''' ) , type=__A , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=__A , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=__A , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=__A , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=__A , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5e-5 , type=__A , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=__A , metavar=__A , type=__A , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=__A , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__A , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=__A , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=__A , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__A ) parser.add_argument('''--train_batch_size''' , default=32 , type=__A ) parser.add_argument('''--eval_batch_size''' , default=32 , type=__A ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class SCREAMING_SNAKE_CASE ( pl.Callback ): """simple docstring""" def __A ( self: Optional[Any] , __A: str , __A: List[str] ) -> Union[str, Any]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class SCREAMING_SNAKE_CASE ( pl.Callback ): """simple docstring""" def __A ( self: str , __A: int , __A: List[str] ) -> List[Any]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__A ) class SCREAMING_SNAKE_CASE ( pl.Callback ): """simple docstring""" def __A ( self: int , __A: Union[str, Any] , __A: Any ) -> Optional[Any]: _A = trainer.lr_schedulers[0]['''scheduler'''] _A = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__A ) def __A ( self: str , __A: pl.Trainer , __A: pl.LightningModule ) -> Optional[Any]: rank_zero_info('''***** Validation results *****''' ) _A = trainer.callback_metrics # Log results for key in sorted(__A ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__A , str(metrics[key] ) ) ) def __A ( self: Tuple , __A: pl.Trainer , __A: pl.LightningModule ) -> Optional[Any]: rank_zero_info('''***** Test results *****''' ) _A = trainer.callback_metrics # Log and save results to file _A = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(__A , '''w''' ) as writer: for key in sorted(__A ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__A , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(__A , str(metrics[key] ) ) ) def __A ( _lowercase , _lowercase ): '''simple docstring''' parser.add_argument( '''--output_dir''' , default=str(Path(_lowercase ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_lowercase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_lowercase , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_lowercase ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_lowercase , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_lowercase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=_lowercase , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(_lowercase ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_lowercase , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def __A ( _lowercase , _lowercase , _lowercase=None , _lowercase=True , _lowercase=[] , _lowercase=None , _lowercase=None , **_lowercase , ): '''simple docstring''' pl.seed_everything(args.seed ) # init model _A = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_lowercase ) # add custom checkpoints if checkpoint_callback is None: _A = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_lowercase ) if logging_callback is None: _A = LoggingCallback() _A = {} if args.fpaa: _A = 16 if args.gpus > 1: _A = '''auto''' _A = '''ddp''' _A = args.accumulate_grad_batches _A = None _A = '''auto''' _A = pl.Trainer.from_argparse_args( _lowercase , weights_summary=_lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowercase , ) if args.do_train: trainer.fit(_lowercase ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __A ( ): '''simple docstring''' _A = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=_lowercase ) _A = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=_lowercase ) env_command_parser(subparsers=_lowercase ) launch_command_parser(subparsers=_lowercase ) tpu_command_parser(subparsers=_lowercase ) test_command_parser(subparsers=_lowercase ) # Let's go _A = parser.parse_args() if not hasattr(_lowercase , '''func''' ): parser.print_help() exit(1 ) # Run args.func(_lowercase ) if __name__ == "__main__": main()
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: str , __A: list[int] ) -> None: _A = len(__A ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __A ): _A = self.prefix_sum[i - 1] + array[i] def __A ( self: Optional[int] , __A: int , __A: int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __A ( self: Optional[int] , __A: int ) -> bool: _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__A ) return False if __name__ == "__main__": import doctest doctest.testmod()
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = AutoencoderKL A_ = "sample" A_ = 1e-2 @property def __A ( self: int ) -> Union[str, Any]: _A = 4 _A = 3 _A = (32, 32) _A = floats_tensor((batch_size, num_channels) + sizes ).to(__A ) return {"sample": image} @property def __A ( self: Optional[Any] ) -> List[str]: return (3, 32, 32) @property def __A ( self: Tuple ) -> Optional[Any]: return (3, 32, 32) def __A ( self: str ) -> Any: _A = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } _A = self.dummy_input return init_dict, inputs_dict def __A ( self: Optional[Any] ) -> Optional[Any]: pass def __A ( self: List[str] ) -> str: pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def __A ( self: Any ) -> Tuple: # enable deterministic behavior for gradient checkpointing _A ,_A = self.prepare_init_args_and_inputs_for_common() _A = self.model_class(**__A ) model.to(__A ) assert not model.is_gradient_checkpointing and model.training _A = model(**__A ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _A = torch.randn_like(__A ) _A = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _A = self.model_class(**__A ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__A ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _A = model_a(**__A ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _A = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) _A = dict(model.named_parameters() ) _A = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def __A ( self: Optional[Any] ) -> Any: _A ,_A = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=__A ) self.assertIsNotNone(__A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(__A ) _A = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __A ( self: Dict ) -> Optional[Any]: _A = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) _A = model.to(__A ) model.eval() if torch_device == "mps": _A = torch.manual_seed(0 ) else: _A = torch.Generator(device=__A ).manual_seed(0 ) _A = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _A = image.to(__A ) with torch.no_grad(): _A = model(__A , sample_posterior=__A , generator=__A ).sample _A = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _A = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": _A = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: _A = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(__A , __A , rtol=1e-2 ) ) @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Any , __A: List[Any] , __A: Optional[Any] ) -> Any: return f"""gaussian_noise_s={seed}_shape={"_".join([str(__A ) for s in shape] )}.npy""" def __A ( self: List[Any] ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self: List[Any] , __A: Tuple=0 , __A: Any=(4, 3, 5_12, 5_12) , __A: Dict=False ) -> Union[str, Any]: _A = torch.floataa if fpaa else torch.floataa _A = torch.from_numpy(load_hf_numpy(self.get_file_format(__A , __A ) ) ).to(__A ).to(__A ) return image def __A ( self: str , __A: List[str]="CompVis/stable-diffusion-v1-4" , __A: int=False ) -> str: _A = '''fp16''' if fpaa else None _A = torch.floataa if fpaa else torch.floataa _A = AutoencoderKL.from_pretrained( __A , subfolder='''vae''' , torch_dtype=__A , revision=__A , ) model.to(__A ).eval() return model def __A ( self: Any , __A: str=0 ) -> Tuple: if torch_device == "mps": return torch.manual_seed(__A ) return torch.Generator(device=__A ).manual_seed(__A ) @parameterized.expand( [ # fmt: off [33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def __A ( self: Union[str, Any] , __A: Any , __A: str , __A: Any ) -> Tuple: _A = self.get_sd_vae_model() _A = self.get_sd_image(__A ) _A = self.get_generator(__A ) with torch.no_grad(): _A = model(__A , generator=__A , sample_posterior=__A ).sample assert sample.shape == image.shape _A = sample[-1, -2:, -2:, :2].flatten().float().cpu() _A = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(__A , __A , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def __A ( self: Optional[int] , __A: Union[str, Any] , __A: int ) -> Tuple: _A = self.get_sd_vae_model(fpaa=__A ) _A = self.get_sd_image(__A , fpaa=__A ) _A = self.get_generator(__A ) with torch.no_grad(): _A = model(__A , generator=__A , sample_posterior=__A ).sample assert sample.shape == image.shape _A = sample[-1, -2:, :2, -2:].flatten().float().cpu() _A = torch.tensor(__A ) assert torch_all_close(__A , __A , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def __A ( self: List[str] , __A: str , __A: Tuple , __A: Optional[Any] ) -> Union[str, Any]: _A = self.get_sd_vae_model() _A = self.get_sd_image(__A ) with torch.no_grad(): _A = model(__A ).sample assert sample.shape == image.shape _A = sample[-1, -2:, -2:, :2].flatten().float().cpu() _A = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(__A , __A , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def __A ( self: str , __A: Tuple , __A: str ) -> str: _A = self.get_sd_vae_model() _A = self.get_sd_image(__A , shape=(3, 4, 64, 64) ) with torch.no_grad(): _A = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] _A = sample[-1, -2:, :2, -2:].flatten().cpu() _A = torch.tensor(__A ) assert torch_all_close(__A , __A , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def __A ( self: str , __A: int , __A: Optional[int] ) -> int: _A = self.get_sd_vae_model(fpaa=__A ) _A = self.get_sd_image(__A , shape=(3, 4, 64, 64) , fpaa=__A ) with torch.no_grad(): _A = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] _A = sample[-1, -2:, :2, -2:].flatten().float().cpu() _A = torch.tensor(__A ) assert torch_all_close(__A , __A , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def __A ( self: Any , __A: Dict ) -> Union[str, Any]: _A = self.get_sd_vae_model(fpaa=__A ) _A = self.get_sd_image(__A , shape=(3, 4, 64, 64) , fpaa=__A ) with torch.no_grad(): _A = model.decode(__A ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _A = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(__A , __A , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def __A ( self: Any , __A: Tuple ) -> Optional[Any]: _A = self.get_sd_vae_model() _A = self.get_sd_image(__A , shape=(3, 4, 64, 64) ) with torch.no_grad(): _A = model.decode(__A ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _A = model.decode(__A ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(__A , __A , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def __A ( self: str , __A: Dict , __A: Any ) -> Tuple: _A = self.get_sd_vae_model() _A = self.get_sd_image(__A ) _A = self.get_generator(__A ) with torch.no_grad(): _A = model.encode(__A ).latent_dist _A = dist.sample(generator=__A ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _A = sample[0, -1, -3:, -3:].flatten().cpu() _A = torch.tensor(__A ) _A = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(__A , __A , atol=__A )
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Optional[Any] ) -> Optional[int]: _A = tempfile.mkdtemp() # fmt: off _A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _A = os.path.join(self.tmpdirname , __A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__A , __A ) def __A ( self: List[Any] , **__A: Any ) -> List[str]: return BertTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: int , **__A: List[str] ) -> Optional[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **__A ) def __A ( self: Union[str, Any] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def __A ( self: Optional[int] ) -> Dict: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self: Any ) -> Dict: _A = self.get_tokenizer() _A = self.get_image_processor() _A = VisionTextDualEncoderProcessor(tokenizer=__A , image_processor=__A ) processor.save_pretrained(self.tmpdirname ) _A = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def __A ( self: Union[str, Any] ) -> int: _A = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__A , padding_value=1.0 ) _A = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=__A , image_processor=__A ) _A = self.prepare_image_inputs() _A = image_processor(__A , return_tensors='''np''' ) _A = processor(images=__A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self: int ) -> List[str]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=__A , image_processor=__A ) _A = '''lower newer''' _A = processor(text=__A ) _A = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self: Optional[int] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=__A , image_processor=__A ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(__A ): processor() def __A ( self: Union[str, Any] ) -> Union[str, Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=__A , image_processor=__A ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__A ) _A = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def __A ( self: Optional[int] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=__A , image_processor=__A ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "dpr" def __init__( self: Union[str, Any] , __A: Dict=3_05_22 , __A: Optional[Any]=7_68 , __A: Union[str, Any]=12 , __A: Optional[int]=12 , __A: Union[str, Any]=30_72 , __A: Dict="gelu" , __A: Optional[Any]=0.1 , __A: Tuple=0.1 , __A: Optional[int]=5_12 , __A: int=2 , __A: List[str]=0.02 , __A: Dict=1e-12 , __A: List[str]=0 , __A: Union[str, Any]="absolute" , __A: int = 0 , **__A: Optional[int] , ) -> Any: super().__init__(pad_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = projection_dim _A = position_embedding_type
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
def __A ( _lowercase , _lowercase ): '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __A ( _lowercase , _lowercase=0 ): '''simple docstring''' return sorted(_lowercase , key=lambda _lowercase : x[column] ) def __A ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ): '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , _lowercase ): _A = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _A = current_dis return min_dis def __A ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ): '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , _lowercase ): for j in range(max(0 , i - 6 ) , _lowercase ): _A = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _A = current_dis return min_dis def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(_lowercase , _lowercase ) # recursion _A = points_counts // 2 _A = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[:mid] , _lowercase ) _A = closest_pair_of_points_sqr( _lowercase , points_sorted_on_y[mid:] , points_counts - mid ) _A = min(_lowercase , _lowercase ) _A = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowercase ) _A = dis_between_closest_in_strip( _lowercase , len(_lowercase ) , _lowercase ) return min(_lowercase , _lowercase ) def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = column_based_sort(_lowercase , column=0 ) _A = column_based_sort(_lowercase , column=1 ) return ( closest_pair_of_points_sqr( _lowercase , _lowercase , _lowercase ) ) ** 0.5 if __name__ == "__main__": __A = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": __A = argparse.ArgumentParser( description=( 'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2']) parser.add_argument('--model_name', default='roberta-large', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') __A = parser.parse_args() if args.model_type == "roberta": __A = RobertaForMaskedLM.from_pretrained(args.model_name) __A = 'roberta' elif args.model_type == "gpt2": __A = GPTaLMHeadModel.from_pretrained(args.model_name) __A = 'transformer' __A = model.state_dict() __A = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: __A = state_dict[f'{prefix}.{param_name}'] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: __A = f'{prefix}.embeddings.{w}.weight' __A = state_dict[param_name] for w in ["weight", "bias"]: __A = f'{prefix}.embeddings.LayerNorm.{w}' __A = state_dict[param_name] # Transformer Blocks # __A = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: __A = state_dict[ f'{prefix}.h.{teacher_idx}.{layer}.{w}' ] __A = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias'] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: __A = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: __A = state_dict[f'{layer}'] if args.vocab_transform: for w in ["weight", "bias"]: __A = state_dict[f'lm_head.dense.{w}'] __A = state_dict[f'lm_head.layer_norm.{w}'] elif args.model_type == "gpt2": for w in ["weight", "bias"]: __A = state_dict[f'{prefix}.ln_f.{w}'] __A = state_dict['lm_head.weight'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(f'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json __A = 'sshleifer/mar_enro_6_3_student' class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __A ( self: Tuple ) -> Dict: super().setUp() _A = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=__A , ) _A = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def __A ( self: Any ) -> List[Any]: MarianMTModel.from_pretrained(__A ) @slow @require_torch_gpu def __A ( self: Tuple ) -> Optional[Any]: _A = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script _A = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() _A = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): _A = bash_script.replace(__A , str(__A ) ) _A = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") _A = f""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future _A = ['''finetune.py'''] + bash_script.split() + args with patch.object(__A , '''argv''' , __A ): _A = argparse.ArgumentParser() _A = pl.Trainer.add_argparse_args(__A ) _A = SummarizationModule.add_model_specific_args(__A , os.getcwd() ) _A = parser.parse_args() _A = main(__A ) # Check metrics _A = load_json(model.metrics_save_path ) _A = metrics['''val'''][0] _A = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __A ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict _A = os.listdir(__A ) _A = [x for x in contents if x.endswith('''.ckpt''' )][0] _A = os.path.join(args.output_dir , __A ) _A = torch.load(__A , map_location='''cpu''' ) _A = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _A = {os.path.basename(__A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def __A ( self: List[str] ) -> Union[str, Any]: _A = f"""{self.test_file_dir_str}/test_data/wmt_en_ro""" _A = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 1_28, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script _A = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) _A = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) _A = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): _A = bash_script.replace(__A , str(__A ) ) _A = self.get_auto_remove_tmp_dir() _A = bash_script.replace('''--fp16''' , '''''' ) _A = 6 _A = ( ['''distillation.py'''] + bash_script.split() + [ f"""--output_dir={output_dir}""", '''--gpus=1''', '''--learning_rate=1e-3''', f"""--num_train_epochs={epochs}""", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(__A , '''argv''' , __A ): _A = argparse.ArgumentParser() _A = pl.Trainer.add_argparse_args(__A ) _A = SummarizationDistiller.add_model_specific_args(__A , os.getcwd() ) _A = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu _A = distill_main(__A ) # Check metrics _A = load_json(model.metrics_save_path ) _A = metrics['''val'''][0] _A = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __A ) # check lightning ckpt can be loaded and has a reasonable statedict _A = os.listdir(__A ) _A = [x for x in contents if x.endswith('''.ckpt''' )][0] _A = os.path.join(args.output_dir , __A ) _A = torch.load(__A , map_location='''cpu''' ) _A = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _A = {os.path.basename(__A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __A = logging.get_logger(__name__) __A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } __A = { 'junnyu/roformer_chinese_small': 1536, 'junnyu/roformer_chinese_base': 1536, 'junnyu/roformer_chinese_char_small': 512, 'junnyu/roformer_chinese_char_base': 512, 'junnyu/roformer_small_discriminator': 128, 'junnyu/roformer_small_generator': 128, } __A = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = PRETRAINED_INIT_CONFIGURATION A_ = RoFormerTokenizer def __init__( self: Any , __A: Union[str, Any]=None , __A: Dict=None , __A: Dict=True , __A: Union[str, Any]="[UNK]" , __A: List[Any]="[SEP]" , __A: List[str]="[PAD]" , __A: List[str]="[CLS]" , __A: List[str]="[MASK]" , __A: int=True , __A: List[Any]=None , **__A: int , ) -> int: super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , ) _A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , __A ) != do_lower_case or pre_tok_state.get('''strip_accents''' , __A ) != strip_accents ): _A = getattr(__A , pre_tok_state.pop('''type''' ) ) _A = do_lower_case _A = strip_accents _A = pre_tok_class(**__A ) _A = do_lower_case def __getstate__( self: Union[str, Any] ) -> str: _A = self.__dict__.copy() _A = BertPreTokenizer() return state def __setstate__( self: Union[str, Any] , __A: Union[str, Any] ) -> Optional[int]: _A = d _A = self.__dict__['''_tokenizer'''].get_vocab() _A = PreTokenizer.custom(JiebaPreTokenizer(__A ) ) def __A ( self: str , __A: str , __A: str=None ) -> int: _A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self: Tuple , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self: List[Any] , __A: str , __A: Optional[str] = None ) -> Tuple[str]: _A = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def __A ( self: List[str] , __A: int , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[str]=False , **__A: Optional[Any] , ) -> Dict: _A = BertPreTokenizer() return super().save_pretrained(__A , __A , __A , __A , **__A )
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
from __future__ import annotations class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: list[list[int]] ) -> Dict: _A = TypeError( '''Matrices must be formed from a list of zero or more lists containing at ''' '''least one and the same number of values, each of which must be of type ''' '''int or float.''' ) if len(__A ) != 0: _A = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error _A = rows else: _A = [] def __A ( self: Any ) -> list[list[int]]: return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def __A ( self: int ) -> int: return len(self.rows ) @property def __A ( self: int ) -> int: return len(self.rows[0] ) @property def __A ( self: List[str] ) -> tuple[int, int]: return (self.num_rows, self.num_columns) @property def __A ( self: Any ) -> bool: return self.order[0] == self.order[1] def __A ( self: int ) -> Matrix: _A = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def __A ( self: Tuple ) -> int: if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def __A ( self: str ) -> bool: return bool(self.determinant() ) def __A ( self: List[Any] , __A: int , __A: int ) -> int: _A = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def __A ( self: Optional[Any] , __A: int , __A: int ) -> int: if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def __A ( self: List[Any] ) -> Matrix: return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def __A ( self: List[Any] ) -> Matrix: return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def __A ( self: Tuple ) -> Matrix: _A = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def __A ( self: List[str] ) -> Matrix: _A = self.determinant() if not determinant: raise TypeError('''Only matrices with a non-zero determinant have an inverse''' ) return self.adjugate() * (1 / determinant) def __repr__( self: List[str] ) -> str: return str(self.rows ) def __str__( self: Any ) -> str: if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '''[''' + '''. '''.join([str(__A ) for value in row] ) + '''.]''' for row in self.rows ] ) + "]" ) def __A ( self: Optional[Any] , __A: list[int] , __A: int | None = None ) -> None: _A = TypeError('''Row must be a list containing all ints and/or floats''' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( '''Row must be equal in length to the other rows in the matrix''' ) if position is None: self.rows.append(__A ) else: _A = self.rows[0:position] + [row] + self.rows[position:] def __A ( self: Any , __A: list[int] , __A: int | None = None ) -> None: _A = TypeError( '''Column must be a list containing all ints and/or floats''' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( '''Column must be equal in length to the other columns in the matrix''' ) if position is None: _A = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: _A = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self: Union[str, Any] , __A: object ) -> bool: if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self: Optional[int] , __A: object ) -> bool: return not self == other def __neg__( self: Dict ) -> Matrix: return self * -1 def __add__( self: Union[str, Any] , __A: Matrix ) -> Matrix: if self.order != other.order: raise ValueError('''Addition requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self: Union[str, Any] , __A: Matrix ) -> Matrix: if self.order != other.order: raise ValueError('''Subtraction requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self: Optional[Any] , __A: Matrix | int | float ) -> Matrix: if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( '''The number of columns in the first matrix must ''' '''be equal to the number of rows in the second''' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( '''A Matrix can only be multiplied by an int, float, or another matrix''' ) def __pow__( self: Optional[Any] , __A: int ) -> Matrix: if not isinstance(__A , __A ): raise TypeError('''A Matrix can only be raised to the power of an int''' ) if not self.is_square: raise ValueError('''Only square matrices can be raised to a power''' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( '''Only invertable matrices can be raised to a negative power''' ) _A = self for _ in range(other - 1 ): result *= self return result @classmethod def __A ( cls: Dict , __A: list[int] , __A: list[int] ) -> int: return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
__A = 8.314_462 # Unit - J mol-1 K-1 def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
def __A ( _lowercase = 50 ): '''simple docstring''' _A = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'{solution() = }')
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __A = 'bart' __A = True @st.cache(allow_output_mutation=_lowercase ) def __A ( ): '''simple docstring''' if LOAD_DENSE_INDEX: _A = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) _A = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) _A = qar_model.eval() else: _A ,_A = (None, None) if MODEL_TYPE == "bart": _A = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) _A = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) _A = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) _A = sas_model.eval() else: _A ,_A = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_lowercase ) def __A ( ): '''simple docstring''' if LOAD_DENSE_INDEX: _A = faiss.StandardGpuResources() _A = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] _A = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , ) _A = faiss.IndexFlatIP(1_28 ) _A = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase ) wikiaab_gpu_index_flat.add(_lowercase ) # TODO fix for larger GPU else: _A ,_A = (None, None) _A = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_lowercase ) def __A ( ): '''simple docstring''' _A = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) _A = elia['''train_eli5'''] _A = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) ) _A = faiss.IndexFlatIP(1_28 ) eli5_train_q_index.add(_lowercase ) return (elia_train, eli5_train_q_index) __A , __A , __A = load_indexes() __A , __A , __A , __A = load_models() __A , __A = load_train_data() def __A ( _lowercase , _lowercase=10 ): '''simple docstring''' _A = embed_questions_for_retrieval([question] , _lowercase , _lowercase ) _A ,_A = eli5_train_q_index.search(_lowercase , _lowercase ) _A = [elia_train[int(_lowercase )] for i in I[0]] return nn_examples def __A ( _lowercase , _lowercase="wiki40b" , _lowercase="dense" , _lowercase=10 ): '''simple docstring''' if source == "none": _A ,_A = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": _A ,_A = query_qa_dense_index( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) else: _A ,_A = query_es_index( _lowercase , _lowercase , index_name='''english_wiki40b_snippets_100w''' , n_results=_lowercase , ) _A = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] _A = '''question: {} context: {}'''.format(_lowercase , _lowercase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _lowercase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase : None), } ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase=64 , _lowercase=2_56 , _lowercase=False , _lowercase=2 , _lowercase=0.95 , _lowercase=0.8 ): '''simple docstring''' with torch.no_grad(): _A = qa_sas_generate( _lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=10_24 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __A = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __A = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __A = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __A = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __A = st.sidebar.checkbox('Demo options') if demo_options: __A = st.sidebar.selectbox( '', action_list, index=3, ) __A = action_list.index(action_st) __A = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __A = show_type == 'Show full text of passages' else: __A = 3 __A = True __A = st.sidebar.checkbox('Retrieval options') if retrieval_options: __A = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __A = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __A = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __A = 'wiki40b' __A = 'dense' __A = 'beam' __A = 2 __A = 64 __A = 256 __A = None __A = None __A = st.sidebar.checkbox('Generation options') if generate_options: __A = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __A = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __A = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __A = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __A = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __A = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __A = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __A = None # start main text __A = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __A = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __A = st.text_input('Enter your question here:', '') else: __A = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __A , __A = make_support(question, source=wiki_source, method='dense', n_results=10) __A , __A = make_support(question, source=wiki_source, method='sparse', n_results=10) __A = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __A = support_list[:10] __A = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __A , __A = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __A , __A = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __A = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __A = res[1].strip() if sec_titles == "": __A = '[{}]({})'.format(res[0], wiki_url) else: __A = sec_titles.split(' & ') __A = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __A = find_nearest_training(question) __A = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __A = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __A = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __A = logging.get_logger(__name__) def __A ( ): '''simple docstring''' _A = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. _A = json.loads(_lowercase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. _A = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". _A = json.loads(_lowercase ) if not mpi_options.get('''sagemaker_mpi_enabled''' , _lowercase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def __A ( self: Dict ) -> Tuple: super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , __A , ) @cached_property def __A ( self: Any ) -> "torch.device": logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: _A = torch.device('''cpu''' ) _A = 0 elif is_sagemaker_model_parallel_available(): _A = smp.local_rank() _A = torch.device('''cuda''' , __A ) _A = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) _A = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) _A = torch.device('''cuda''' , self.local_rank ) _A = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 _A = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. _A = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) _A = torch.device('''cuda''' , self.local_rank ) _A = 1 if device.type == "cuda": torch.cuda.set_device(__A ) return device @property def __A ( self: List[Any] ) -> Tuple: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __A ( self: Dict ) -> Any: return not is_sagemaker_model_parallel_available() @property def __A ( self: int ) -> Optional[Any]: return False
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
from random import randint from tempfile import TemporaryFile import numpy as np def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = 0 if start < end: _A = randint(_lowercase , _lowercase ) _A = a[end] _A = a[pivot] _A = temp _A ,_A = _in_place_partition(_lowercase , _lowercase , _lowercase ) count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 ) count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase ) return count def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = 0 _A = randint(_lowercase , _lowercase ) _A = a[end] _A = a[pivot] _A = temp _A = start - 1 for index in range(_lowercase , _lowercase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _A = new_pivot_index + 1 _A = a[new_pivot_index] _A = a[index] _A = temp _A = a[new_pivot_index + 1] _A = a[end] _A = temp return new_pivot_index + 1, count __A = TemporaryFile() __A = 100 # 1000 elements are to be sorted __A , __A = 0, 1 # mean and standard deviation __A = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array __A = np.load(outfile) __A = len(M) - 1 __A = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1
import warnings from .generation import TFGenerationMixin class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" warnings.warn( "Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will " "be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , snake_case , )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) else: _A = max( mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , ) _A = val return f[i][j] def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _A = dp[i - 1][w_] return dp[n][w_], dp def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _A = len(_lowercase ) if num_items != len(_lowercase ): _A = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(_lowercase )} values""" ) raise ValueError(_lowercase ) for i in range(_lowercase ): if not isinstance(wt[i] , _lowercase ): _A = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_lowercase ) _A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) _A = set() _construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) return optimal_val, example_optional_set def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase ) else: optimal_set.add(_lowercase ) _construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase ) if __name__ == "__main__": __A = [3, 2, 4, 4] __A = [4, 3, 2, 3] __A = 4 __A = 6 __A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
62
1
from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "WhisperFeatureExtractor" A_ = "WhisperTokenizer" def __init__( self: Tuple , __A: int , __A: int ) -> List[str]: super().__init__(__A , __A ) _A = self.feature_extractor _A = False def __A ( self: int , __A: Optional[int]=None , __A: Optional[Any]=None , __A: Optional[int]=True ) -> List[Any]: return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A ) def __call__( self: Optional[Any] , *__A: Optional[int] , **__A: Optional[Any] ) -> Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__A , **__A ) _A = kwargs.pop('''audio''' , __A ) _A = kwargs.pop('''sampling_rate''' , __A ) _A = kwargs.pop('''text''' , __A ) if len(__A ) > 0: _A = args[0] _A = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _A = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A ) if text is not None: _A = self.tokenizer(__A , **__A ) if text is None: return inputs elif audio is None: return encodings else: _A = encodings['''input_ids'''] return inputs def __A ( self: Any , *__A: Optional[Any] , **__A: Any ) -> Any: return self.tokenizer.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Dict , **__A: str ) -> Any: return self.tokenizer.decode(*__A , **__A ) def __A ( self: int , __A: str , __A: Tuple="np" ) -> int: return self.tokenizer.get_prompt_ids(__A , return_tensors=__A )
62
def __A ( _lowercase = 1_00_00_00 ): '''simple docstring''' _A = 1 _A = 1 _A = {1: 1} for inputa in range(2 , _lowercase ): _A = 0 _A = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _A = (3 * number) + 1 counter += 1 if inputa not in counters: _A = counter if counter > pre_counter: _A = inputa _A = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
62
1
from math import sqrt def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' must been an int and positive" _A = True # 0 and 1 are none primes. if number <= 1: _A = False for divisor in range(2 , int(round(sqrt(_lowercase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: _A = False break # precondition assert isinstance(_lowercase , _lowercase ), "'status' must been from type bool" return status def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N _A = list(range(2 , n + 1 ) ) _A = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowercase ) ): for j in range(i + 1 , len(_lowercase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): _A = 0 # filters actual prime numbers. _A = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2" _A = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowercase ): ans.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and number >= 0, "'number' must been an int and >= 0" _A = [] # this list will be returns of the function. # potential prime number factors. _A = 2 _A = number if number == 0 or number == 1: ans.append(_lowercase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowercase ): while quotient != 1: if is_prime(_lowercase ) and (quotient % factor == 0): ans.append(_lowercase ) quotient /= factor else: factor += 1 else: ans.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" _A = 0 # prime factorization of 'number' _A = prime_factorization(_lowercase ) _A = max(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int" return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" _A = 0 # prime factorization of 'number' _A = prime_factorization(_lowercase ) _A = min(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int" return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowercase ), "compare bust been from type bool" return number % 2 == 0 def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowercase ), "compare bust been from type bool" return number % 2 != 0 def __A ( _lowercase ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and (number > 2) and is_even(_lowercase ) ), "'number' must been an int, even and > 2" _A = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' _A = get_prime_numbers(_lowercase ) _A = len(_lowercase ) # run variable for while-loops. _A = 0 _A = None # exit variable. for break up the loops _A = True while i < len_pn and loop: _A = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: _A = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowercase , _lowercase ) and (len(_lowercase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __A ( _lowercase , _lowercase ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." _A = 0 while numbera != 0: _A = numbera % numbera _A = numbera _A = rest # precondition assert isinstance(_lowercase , _lowercase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __A ( _lowercase , _lowercase ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." _A = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' _A = prime_factorization(_lowercase ) _A = prime_factorization(_lowercase ) elif numbera == 1 or numbera == 1: _A = [] _A = [] _A = max(_lowercase , _lowercase ) _A = 0 _A = 0 _A = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: _A = prime_fac_a.count(_lowercase ) _A = prime_fac_a.count(_lowercase ) for _ in range(max(_lowercase , _lowercase ) ): ans *= n else: _A = prime_fac_a.count(_lowercase ) for _ in range(_lowercase ): ans *= n done.append(_lowercase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: _A = prime_fac_a.count(_lowercase ) for _ in range(_lowercase ): ans *= n done.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'number' must been a positive int" _A = 0 _A = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowercase ): ans += 1 # precondition assert isinstance(_lowercase , _lowercase ) and is_prime( _lowercase ), "'ans' must been a prime number and from type int" return ans def __A ( _lowercase , _lowercase ): '''simple docstring''' assert ( is_prime(_lowercase ) and is_prime(_lowercase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" _A = p_number_a + 1 # jump to the next number _A = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowercase ): number += 1 while number < p_number_a: ans.append(_lowercase ) number += 1 # fetch the next prime number. while not is_prime(_lowercase ): number += 1 # precondition assert ( isinstance(_lowercase , _lowercase ) and ans[0] != p_number_a and ans[len(_lowercase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 1), "'n' must been int and >= 1" _A = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowercase ) # precondition assert ans[0] == 1 and ans[len(_lowercase ) - 1] == n, "Error in function getDivisiors(...)" return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number > 1 ), "'number' must been an int and >= 1" _A = get_divisors(_lowercase ) # precondition assert ( isinstance(_lowercase , _lowercase ) and (divisors[0] == 1) and (divisors[len(_lowercase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def __A ( _lowercase , _lowercase ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. _A = gcd(abs(_lowercase ) , abs(_lowercase ) ) # precondition assert ( isinstance(_lowercase , _lowercase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been a int and >= 0" _A = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def __A ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been an int and >= 0" _A = 0 _A = 1 _A = 1 # this will be return for _ in range(n - 1 ): _A = ans ans += fiba _A = tmp return ans
62
def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = word.split() def justify(_lowercase , _lowercase , _lowercase ) -> str: _A = max_width - width _A = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 _A = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) _A = [] _A = [] _A = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width _A ,_A = [word], len(_lowercase ) _A = max_width - width - len(_lowercase ) answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
62
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __A ( _lowercase , _lowercase , _lowercase=8 ): '''simple docstring''' _A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: List[Any] , __A: UNetaDConditionModel , __A: DDPMScheduler , __A: VQModel , ) -> List[Any]: super().__init__() self.register_modules( unet=__A , scheduler=__A , movq=__A , ) _A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __A ( self: Union[str, Any] , __A: Optional[Any] , __A: int , __A: int , __A: Tuple , __A: List[str] , __A: Optional[int] ) -> Union[str, Any]: if latents is None: _A = randn_tensor(__A , generator=__A , device=__A , dtype=__A ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) _A = latents.to(__A ) _A = latents * scheduler.init_noise_sigma return latents def __A ( self: Optional[Any] , __A: Any=0 ) -> Dict: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) _A = torch.device(f"""cuda:{gpu_id}""" ) _A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__A , __A ) def __A ( self: List[Any] , __A: Union[str, Any]=0 ) -> List[Any]: if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) _A = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=__A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _A = None for cpu_offloaded_model in [self.unet, self.movq]: _A ,_A = cpu_offload_with_hook(__A , __A , prev_module_hook=__A ) # We'll offload the last model manually. _A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __A ( self: Union[str, Any] ) -> List[str]: if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(__A , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__A ) def __call__( self: Union[str, Any] , __A: Union[torch.FloatTensor, List[torch.FloatTensor]] , __A: Union[torch.FloatTensor, List[torch.FloatTensor]] , __A: int = 5_12 , __A: int = 5_12 , __A: int = 1_00 , __A: float = 4.0 , __A: int = 1 , __A: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A: Optional[torch.FloatTensor] = None , __A: Optional[str] = "pil" , __A: bool = True , ) -> Any: _A = self._execution_device _A = guidance_scale > 1.0 if isinstance(__A , __A ): _A = torch.cat(__A , dim=0 ) _A = image_embeds.shape[0] * num_images_per_prompt if isinstance(__A , __A ): _A = torch.cat(__A , dim=0 ) if do_classifier_free_guidance: _A = image_embeds.repeat_interleave(__A , dim=0 ) _A = negative_image_embeds.repeat_interleave(__A , dim=0 ) _A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__A ) self.scheduler.set_timesteps(__A , device=__A ) _A = self.scheduler.timesteps _A = self.unet.config.in_channels _A ,_A = downscale_height_and_width(__A , __A , self.movq_scale_factor ) # create initial latent _A = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __A , __A , __A , self.scheduler , ) for i, t in enumerate(self.progress_bar(__A ) ): # expand the latents if we are doing classifier free guidance _A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A = {'''image_embeds''': image_embeds} _A = self.unet( sample=__A , timestep=__A , encoder_hidden_states=__A , added_cond_kwargs=__A , return_dict=__A , )[0] if do_classifier_free_guidance: _A ,_A = noise_pred.split(latents.shape[1] , dim=1 ) _A ,_A = noise_pred.chunk(2 ) _A ,_A = variance_pred.chunk(2 ) _A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _A ,_A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step( __A , __A , __A , generator=__A , )[0] # post-processing _A = self.movq.decode(__A , force_not_quantize=__A )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _A = image * 0.5 + 0.5 _A = image.clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _A = self.numpy_to_pil(__A ) if not return_dict: return (image,) return ImagePipelineOutput(images=__A )
62
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope='''session''' ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _A = bytes(_lowercase , '''utf-8''' ) with zstd.open(_lowercase , '''wb''' ) as f: f.write(_lowercase ) return path @pytest.fixture def __A ( _lowercase ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f: f.write(_lowercase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _A = input_paths[compression_format] _A = tmp_path / '''cache''' _A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase ) _A = cached_path(_lowercase , download_config=_lowercase ) with open(_lowercase ) as f: _A = f.read() with open(_lowercase ) as f: _A = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = '''custom_cache''' _A = '''custom_extracted_dir''' _A = tmp_path / '''custom_extracted_path''' if default_extracted: _A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) ) _A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _A = xz_file _A = ( DownloadConfig(extract_compressed_file=_lowercase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase ) ) _A = cached_path(_lowercase , download_config=_lowercase ) assert Path(_lowercase ).parent.parts[-2:] == expected def __A ( _lowercase ): '''simple docstring''' _A = str(Path(_lowercase ).resolve() ) assert cached_path(_lowercase ) == text_file # relative path _A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowercase ) == text_file def __A ( _lowercase ): '''simple docstring''' _A = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_lowercase ): cached_path(_lowercase ) # relative path _A = '''./__missing_file__.txt''' with pytest.raises(_lowercase ): cached_path(_lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_lowercase ) as f: _A = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( ): '''simple docstring''' with pytest.raises(_lowercase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): http_get('''https://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_lowercase ): fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase ) with pytest.raises(_lowercase ): fsspec_head('''s3://huggingface.co''' )
62
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ): """simple docstring""" def __A ( self: Dict ) -> int: return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__A , ) def __A ( self: Optional[int] , __A: Any , __A: List[str] ) -> str: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def __A ( self: List[str] , __A: str , __A: List[str] ) -> Dict: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__A ) class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ): """simple docstring""" def __A ( self: Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__A , ) def __A ( self: int , __A: str , __A: str ) -> Tuple: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def __A ( self: Optional[Any] , __A: Dict , __A: int ) -> Tuple: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__A ) def __A ( ): '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def __A ( ): '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @require_beam def __A ( self: Union[str, Any] ) -> str: _A = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = DummyBeamDataset(cache_dir=__A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _A = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def __A ( self: Tuple ) -> Dict: import apache_beam as beam _A = beam.io.parquetio.WriteToParquet _A = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = DummyBeamDataset(cache_dir=__A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: _A = partial(__A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( __A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( __A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _A = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def __A ( self: List[str] ) -> int: with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = DummyBeamDataset(cache_dir=__A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __A ( self: List[str] ) -> List[Any]: _A = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _A = NestedBeamDataset(cache_dir=__A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) _A = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
62
import math def __A ( _lowercase ): '''simple docstring''' _A = [] _A = 2 _A = int(math.sqrt(_lowercase ) ) # Size of every segment _A = [True] * (end + 1) _A = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): _A = False start += 1 prime += in_prime _A = end + 1 _A = min(2 * end , _lowercase ) while low <= n: _A = [True] * (high - low + 1) for each in in_prime: _A = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): _A = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) _A = high + 1 _A = min(high + end , _lowercase ) return prime print(sieve(10**6))
62
1
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: List[str] , __A: VQModel , __A: UNetaDModel , __A: DDIMScheduler ) -> Tuple: super().__init__() self.register_modules(vqvae=__A , unet=__A , scheduler=__A ) @torch.no_grad() def __call__( self: Optional[Any] , __A: int = 1 , __A: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A: float = 0.0 , __A: int = 50 , __A: Optional[str] = "pil" , __A: bool = True , **__A: int , ) -> Union[Tuple, ImagePipelineOutput]: _A = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__A , ) _A = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _A = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__A ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature _A = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _A = {} if accepts_eta: _A = eta for t in self.progress_bar(self.scheduler.timesteps ): _A = self.scheduler.scale_model_input(__A , __A ) # predict the noise residual _A = self.unet(__A , __A ).sample # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__A , __A , __A , **__A ).prev_sample # decode the image latents with the VAE _A = self.vqvae.decode(__A ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__A ) if not return_dict: return (image,) return ImagePipelineOutput(images=__A )
62
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: Tuple ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Dict , __A: Dict ) -> Tuple: _A ,_A ,_A ,_A = hidden_states.shape _A = jax.image.resize( __A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def __A ( self: List[str] ) -> Tuple: _A = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _A = self.conv(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" A_ = 42 A_ = None A_ = 0.0 A_ = None A_ = jnp.floataa def __A ( self: Dict ) -> Dict: _A = self.in_channels if self.out_channels is None else self.out_channels _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = nn.Dense(__A , dtype=self.dtype ) _A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _A = nn.Dropout(self.dropout_prob ) _A = nn.Conv( __A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _A = None if use_nin_shortcut: _A = nn.Conv( __A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]: _A = hidden_states _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.conva(__A ) _A = self.time_emb_proj(nn.swish(__A ) ) _A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 ) _A = hidden_states + temb _A = self.norma(__A ) _A = nn.swish(__A ) _A = self.dropout(__A , __A ) _A = self.conva(__A ) if self.conv_shortcut is not None: _A = self.conv_shortcut(__A ) return hidden_states + residual
62
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "unispeech" def __init__( self: str , __A: Any=32 , __A: str=7_68 , __A: str=12 , __A: int=12 , __A: Optional[Any]=30_72 , __A: List[str]="gelu" , __A: List[str]=0.1 , __A: Dict=0.1 , __A: int=0.1 , __A: Any=0.0 , __A: str=0.0 , __A: str=0.1 , __A: Any=0.1 , __A: Optional[int]=0.02 , __A: int=1e-5 , __A: Dict="group" , __A: Union[str, Any]="gelu" , __A: str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __A: List[str]=(5, 2, 2, 2, 2, 2, 2) , __A: Any=(10, 3, 3, 3, 3, 2, 2) , __A: List[str]=False , __A: Dict=1_28 , __A: Tuple=16 , __A: int=False , __A: List[str]=True , __A: Any=0.05 , __A: str=10 , __A: Any=2 , __A: str=0.0 , __A: Tuple=10 , __A: Any=0 , __A: Optional[Any]=3_20 , __A: str=2 , __A: Optional[int]=0.1 , __A: int=1_00 , __A: List[str]=2_56 , __A: int=2_56 , __A: Optional[Any]=0.1 , __A: List[str]="mean" , __A: Optional[int]=False , __A: Any=False , __A: str=2_56 , __A: List[str]=80 , __A: List[str]=0 , __A: Union[str, Any]=1 , __A: int=2 , __A: Union[str, Any]=0.5 , **__A: Union[str, Any] , ) -> List[str]: super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A ) _A = hidden_size _A = feat_extract_norm _A = feat_extract_activation _A = list(__A ) _A = list(__A ) _A = list(__A ) _A = conv_bias _A = num_conv_pos_embeddings _A = num_conv_pos_embedding_groups _A = len(self.conv_dim ) _A = num_hidden_layers _A = intermediate_size _A = hidden_act _A = num_attention_heads _A = hidden_dropout _A = attention_dropout _A = activation_dropout _A = feat_proj_dropout _A = final_dropout _A = layerdrop _A = layer_norm_eps _A = initializer_range _A = num_ctc_classes _A = vocab_size _A = do_stable_layer_norm _A = use_weighted_layer_sum _A = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _A = apply_spec_augment _A = mask_time_prob _A = mask_time_length _A = mask_time_min_masks _A = mask_feature_prob _A = mask_feature_length _A = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _A = num_codevectors_per_group _A = num_codevector_groups _A = contrastive_logits_temperature _A = feat_quantizer_dropout _A = num_negatives _A = codevector_dim _A = proj_codevector_dim _A = diversity_loss_weight # ctc loss _A = ctc_loss_reduction _A = ctc_zero_infinity # pretraining loss _A = replace_prob @property def __A ( self: List[str] ) -> int: return functools.reduce(operator.mul , self.conv_stride , 1 )
62
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
1
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __A = logging.get_logger(__name__) __A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } __A = { 'allenai/led-base-16384': 16384, } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = LEDTokenizer A_ = ["input_ids", "attention_mask"] def __init__( self: List[str] , __A: List[str]=None , __A: Dict=None , __A: List[Any]=None , __A: List[Any]="replace" , __A: List[Any]="<s>" , __A: Dict="</s>" , __A: Any="</s>" , __A: Optional[int]="<s>" , __A: Dict="<unk>" , __A: Tuple="<pad>" , __A: str="<mask>" , __A: Optional[int]=False , __A: Dict=True , **__A: Union[str, Any] , ) -> List[str]: super().__init__( __A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , ) _A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __A ) != add_prefix_space: _A = getattr(__A , pre_tok_state.pop('''type''' ) ) _A = add_prefix_space _A = pre_tok_class(**__A ) _A = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _A = '''post_processor''' _A = getattr(self.backend_tokenizer , __A , __A ) if tokenizer_component_instance: _A = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _A = tuple(state['''sep'''] ) if "cls" in state: _A = tuple(state['''cls'''] ) _A = False if state.get('''add_prefix_space''' , __A ) != add_prefix_space: _A = add_prefix_space _A = True if state.get('''trim_offsets''' , __A ) != trim_offsets: _A = trim_offsets _A = True if changes_to_apply: _A = getattr(__A , state.pop('''type''' ) ) _A = component_class(**__A ) setattr(self.backend_tokenizer , __A , __A ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __A ( self: List[str] ) -> str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __A ( self: List[Any] , __A: Optional[int] ) -> str: _A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value _A = value def __A ( self: List[str] , *__A: str , **__A: str ) -> BatchEncoding: _A = kwargs.get('''is_split_into_words''' , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__A , **__A ) def __A ( self: List[str] , *__A: int , **__A: int ) -> BatchEncoding: _A = kwargs.get('''is_split_into_words''' , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__A , **__A ) def __A ( self: List[str] , __A: str , __A: Optional[str] = None ) -> Tuple[str]: _A = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def __A ( self: Optional[int] , __A: int , __A: Union[str, Any]=None ) -> int: _A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __A ( self: int , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self: List[str] , __A: Union[Dict[str, EncodedInput], BatchEncoding] , __A: Optional[int] = None , __A: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A: Optional[int] = None , __A: Optional[bool] = None , ) -> dict: _A = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: _A = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _A = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _A = len(encoded_inputs['''global_attention_mask'''] ) != len(__A ) if needs_to_be_padded: _A = len(__A ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _A = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": _A = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
62
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE ( snake_case , snake_case ): """simple docstring""" A_ = 1 @register_to_config def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]: # set `betas`, `alphas`, `timesteps` self.set_timesteps(__A ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int: _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(__A ) _A = [] def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__A ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(__A , __A , __A , __A ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A ) def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor: return sample def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]: _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(__A , 1e-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[str] ) -> Dict: return self.config.num_train_timesteps
62
1
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Union[str, Any] , __A: Dict , __A: Union[str, Any]=sys.maxsize ) -> Tuple: _A = '''bilinear''' _A = max_size _A = short_edge_length def __call__( self: Dict , __A: Optional[int] ) -> Any: _A = [] for img in imgs: _A ,_A = img.shape[:2] # later: provide list and randomly choose index for resize _A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img _A = size * 1.0 / min(__A , __A ) if h < w: _A ,_A = size, scale * w else: _A ,_A = scale * h, size if max(__A , __A ) > self.max_size: _A = self.max_size * 1.0 / max(__A , __A ) _A = newh * scale _A = neww * scale _A = int(neww + 0.5 ) _A = int(newh + 0.5 ) if img.dtype == np.uinta: _A = Image.fromarray(__A ) _A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) _A = np.asarray(__A ) else: _A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _A = nn.functional.interpolate( __A , (newh, neww) , mode=self.interp_method , align_corners=__A ).squeeze(0 ) img_augs.append(__A ) return img_augs class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: int ) -> List[str]: _A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) _A = cfg.INPUT.FORMAT _A = cfg.SIZE_DIVISIBILITY _A = cfg.PAD_VALUE _A = cfg.INPUT.MAX_SIZE_TEST _A = cfg.MODEL.DEVICE _A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = lambda __A : (x - self.pixel_mean) / self.pixel_std def __A ( self: List[Any] , __A: str ) -> Union[str, Any]: _A = tuple(max(__A ) for s in zip(*[img.shape for img in images] ) ) _A = [im.shape[-2:] for im in images] _A = [ nn.functional.pad( __A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(__A , __A ) ] return torch.stack(__A ), torch.tensor(__A ) def __call__( self: int , __A: Dict , __A: List[str]=False ) -> List[str]: with torch.no_grad(): if not isinstance(__A , __A ): _A = [images] if single_image: assert len(__A ) == 1 for i in range(len(__A ) ): if isinstance(images[i] , torch.Tensor ): images.insert(__A , images.pop(__A ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( __A , torch.as_tensor(img_tensorize(images.pop(__A ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge _A = torch.tensor([im.shape[:2] for im in images] ) _A = self.aug(__A ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _A = [self.normalizer(__A ) for x in images] # now pad them to do the following operations _A ,_A = self.pad(__A ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _A = torch.true_divide(__A , __A ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def __A ( _lowercase , _lowercase ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def __A ( _lowercase , _lowercase ): '''simple docstring''' assert torch.isfinite(_lowercase ).all(), "Box tensor contains infinite or NaN!" _A ,_A = box_size tensor[:, 0].clamp_(min=0 , max=_lowercase ) tensor[:, 1].clamp_(min=0 , max=_lowercase ) tensor[:, 2].clamp_(min=0 , max=_lowercase ) tensor[:, 3].clamp_(min=0 , max=_lowercase )
62
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
1
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __A = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Dict , __A: str , __A: bool , __A: str = None , __A: list = None ) -> List[str]: _A = None _A = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) _A = os.path.abspath('''examples''' ) for item in os.listdir(__A ): if item not in EXCLUDE_EXAMPLES: _A = os.path.join(__A , __A ) if os.path.isfile(__A ) and ".py" in item_path: with self.subTest( tested_script=__A , feature_script=__A , tested_section='''main()''' if parser_only else '''training_function()''' , ): _A = compare_against_test( os.path.join(__A , __A ) , __A , __A , __A ) _A = '''\n'''.join(__A ) if special_strings is not None: for string in special_strings: _A = diff.replace(__A , '''''' ) self.assertEqual(__A , '''''' ) def __A ( self: Optional[int] ) -> Union[str, Any]: self.one_complete_example('''complete_nlp_example.py''' , __A ) self.one_complete_example('''complete_nlp_example.py''' , __A ) def __A ( self: Any ) -> Tuple: _A = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) _A = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __A , __A , __A ) self.one_complete_example('''complete_cv_example.py''' , __A , __A , __A ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = False @classmethod def __A ( cls: List[Any] ) -> List[str]: super().setUpClass() _A = tempfile.mkdtemp() _A = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _A = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def __A ( cls: str ) -> int: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __A ( self: str ) -> Dict: _A = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def __A ( self: Any ) -> Optional[Any]: _A = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() _A = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def __A ( self: Tuple ) -> Tuple: _A = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() _A = run_command(self._launch_args + testargs , return_stdout=__A ) self.assertNotIn('''epoch 0:''' , __A ) self.assertIn('''epoch 1:''' , __A ) def __A ( self: Union[str, Any] ) -> Dict: _A = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() _A = run_command(self._launch_args + testargs , return_stdout=__A ) if torch.cuda.is_available(): _A = torch.cuda.device_count() else: _A = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __A ) self.assertIn('''epoch 1:''' , __A ) else: self.assertIn('''epoch 0:''' , __A ) self.assertIn('''epoch 1:''' , __A ) @slow def __A ( self: List[str] ) -> List[Any]: _A = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): _A = run_command(self._launch_args + testargs , return_stdout=__A ) _A = re.findall('''({.+})''' , __A ) _A = [r for r in results if '''accuracy''' in r][-1] _A = ast.literal_eval(__A ) self.assertGreaterEqual(results['''accuracy'''] , 0.75 ) def __A ( self: int ) -> Optional[Any]: _A = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __A ( self: str ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdir: _A = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__A , '''tracking''' ) ) ) def __A ( self: List[Any] ) -> Optional[int]: _A = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def __A ( self: Dict ) -> List[Any]: _A = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
62
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __A = NewType('DataClass', Any) __A = NewType('DataClassType', Any) def __A ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __A ( _lowercase ): '''simple docstring''' _A = {str(_lowercase ): choice for choice in choices} return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase ) def __A ( *, _lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ): '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _A = {} if aliases is not None: _A = aliases if help is not None: _A = help return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = 42 def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str: # To make the default appear when using --help if "formatter_class" not in kwargs: _A = ArgumentDefaultsHelpFormatter super().__init__(**__A ) if dataclasses.is_dataclass(__A ): _A = [dataclass_types] _A = list(__A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__A ) @staticmethod def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str: _A = f"""--{field.name}""" _A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __A ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) _A = kwargs.pop('''aliases''' , [] ) if isinstance(__A , __A ): _A = [aliases] _A = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__A ) not in field.type.__args__: # filter `str` in Union _A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _A = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _A = ( field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1] ) _A = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _A = {} if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )): if origin_type is Literal: _A = field.type.__args__ else: _A = [x.value for x in field.type] _A = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: _A = field.default else: _A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _A = copy(__A ) # Hack because type=bool in argparse does not behave as we want. _A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _A = default # This tells argparse we accept 0 or 1 value after --field_name _A = '''?''' # This is the value that will get picked if we do --field_name (without value) _A = True elif isclass(__A ) and issubclass(__A , __A ): _A = field.type.__args__[0] _A = '''+''' if field.default_factory is not dataclasses.MISSING: _A = field.default_factory() elif field.default is dataclasses.MISSING: _A = True else: _A = field.type if field.default is not dataclasses.MISSING: _A = field.default elif field.default_factory is not dataclasses.MISSING: _A = field.default_factory() else: _A = True parser.add_argument(__A , *__A , **__A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _A = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A ) def __A ( self: Dict , __A: DataClassType ) -> List[Any]: if hasattr(__A , '''_argument_group_name''' ): _A = self.add_argument_group(dtype._argument_group_name ) else: _A = self try: _A = get_type_hints(__A ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ): _A = '''.'''.join(map(__A , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__A ): if not field.init: continue _A = type_hints[field.name] self._parse_dataclass_field(__A , __A ) def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): _A = [] if args_filename: args_files.append(Path(__A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _A = ArgumentParser() args_file_parser.add_argument(__A , type=__A , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) _A ,_A = args_file_parser.parse_known_args(args=__A ) _A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A ) if cmd_args_file_paths: args_files.extend([Path(__A ) for p in cmd_args_file_paths] ) _A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _A = file_args + args if args is not None else file_args + sys.argv[1:] _A ,_A = self.parse_known_args(args=__A ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in vars(__A ).items() if k in keys} for k in keys: delattr(__A , __A ) _A = dtype(**__A ) outputs.append(__A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]: _A = set(args.keys() ) _A = [] for dtype in self.dataclass_types: _A = {f.name for f in dataclasses.fields(__A ) if f.init} _A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) _A = dtype(**__A ) outputs.append(__A ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" ) return tuple(__A ) def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file: _A = json.loads(open_json_file.read() ) _A = self.parse_dict(__A , allow_extra_keys=__A ) return tuple(__A ) def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]: _A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A ) return tuple(__A )
62
1
import operator as op def __A ( _lowercase ): '''simple docstring''' _A = [] _A = lambda _lowercase , _lowercase : int(x / y ) # noqa: E731 integer division operation _A = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' ) print('''-''' * (30 + len(_lowercase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_lowercase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(_lowercase ) , sep=''' | ''' ) else: _A = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(_lowercase ) , sep=''' | ''' ) _A = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(_lowercase ) , sep=''' | ''' ) stack.append( str(opr[x](int(_lowercase ) , int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(_lowercase ) , sep=''' | ''' , ) return int(stack[0] ) if __name__ == "__main__": __A = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
62
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def __A ( self: Dict ) -> Optional[int]: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self: Optional[Any] ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self: Dict ) -> Tuple: _A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any: with torch.no_grad(): _A = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int: _A = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(__A: int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=__A , pixel_mask=__A ) _A = model(__A ) comm_check_on_output(__A ) _A = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False def __A ( self: int ) -> Tuple: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: List[Any] ) -> Dict: self.config_tester.run_common_tests() def __A ( self: Optional[Any] ) -> int: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Dict ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __A ( self: int ) -> Tuple: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __A ( self: List[Any] ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __A ( self: Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __A ( self: int ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __A ( self: Union[str, Any] ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: List[Any] ) -> Any: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def __A ( self: int ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( self: Optional[Any] ) -> Optional[int]: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _A = model(**__A ) self.assertTrue(outputs.loss is not None ) def __A ( self: Optional[Any] ) -> List[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def __A ( self: Any ) -> Tuple: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ).to(__A ) _A = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def __A ( self: Dict ) -> Union[str, Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def __A ( self: Tuple ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(__A ) model.to(__A ) model.train() _A = model(__A , mask_labels=__A , class_labels=__A ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Union[str, Any] ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __A ( self: List[Any] ) -> Any: _A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) _A = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _A = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Dict ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: List[Any] ) -> Dict: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(__A , return_tensors='''pt''' ).to(__A ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**__A ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] _A = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def __A ( self: Optional[Any] ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) _A = inputs['''pixel_values'''].to(__A ) _A = [el.to(__A ) for el in inputs['''mask_labels''']] _A = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**__A ) self.assertTrue(outputs.loss is not None )
62
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version __A = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize __A = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' __A = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' __A = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def __A ( self: List[str] ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] , ) def __A ( self: int , __A: Optional[Any] ) -> Optional[Any]: import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def __A ( self: str , __A: Any , __A: Dict , __A: Optional[Any]=0.9 , __A: Dict=3 , __A: Union[str, Any]=0.5 ) -> Optional[Any]: if NLTK_VERSION >= version.Version('''3.6.5''' ): _A = [ meteor_score.single_meteor_score( word_tokenize(__A ) , word_tokenize(__A ) , alpha=__A , beta=__A , gamma=__A ) for ref, pred in zip(__A , __A ) ] else: _A = [ meteor_score.single_meteor_score(__A , __A , alpha=__A , beta=__A , gamma=__A ) for ref, pred in zip(__A , __A ) ] return {"meteor": np.mean(__A )}
62
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str: _A = question_encoder _A = generator _A = self.question_encoder def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict: if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) _A = os.path.join(__A , '''question_encoder_tokenizer''' ) _A = os.path.join(__A , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop('''config''' , __A ) if config is None: _A = RagConfig.from_pretrained(__A ) _A = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) _A = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=__A , generator=__A ) def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int: return self.current_tokenizer(*__A , **__A ) def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict: return self.generator.batch_decode(*__A , **__A ) def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple: return self.generator.decode(*__A , **__A ) def __A ( self: Dict ) -> List[str]: _A = self.question_encoder def __A ( self: Union[str, Any] ) -> int: _A = self.generator def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding: warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , __A , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) _A = labels['''input_ids'''] return model_inputs
62
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = TextToVideoSDPipeline A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. A_ = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def __A ( self: Optional[Any] ) -> List[Any]: torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) _A = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) _A = CLIPTextModel(__A ) _A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _A = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def __A ( self: Union[str, Any] , __A: List[Any] , __A: Tuple=0 ) -> int: if str(__A ).startswith('''mps''' ): _A = torch.manual_seed(__A ) else: _A = torch.Generator(device=__A ).manual_seed(__A ) _A = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def __A ( self: List[Any] ) -> Dict: _A = '''cpu''' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = TextToVideoSDPipeline(**__A ) _A = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) _A = self.get_dummy_inputs(__A ) _A = '''np''' _A = sd_pipe(**__A ).frames _A = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self: Dict ) -> str: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __A ( self: Union[str, Any] ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A , expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __A ( self: Any ) -> Optional[Any]: pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __A ( self: Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def __A ( self: Optional[int] ) -> List[str]: pass def __A ( self: List[Any] ) -> Dict: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: List[Any] ) -> str: _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) _A = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) _A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _A = pipe.to('''cuda''' ) _A = '''Spiderman is surfing''' _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = pipe(__A , generator=__A , num_inference_steps=25 , output_type='''pt''' ).frames _A = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def __A ( self: List[Any] ) -> Union[str, Any]: _A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) _A = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) _A = pipe.to('''cuda''' ) _A = '''Spiderman is surfing''' _A = torch.Generator(device='''cpu''' ).manual_seed(0 ) _A = pipe(__A , generator=__A , num_inference_steps=2 , output_type='''pt''' ).frames _A = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
62
from __future__ import annotations def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741 '''simple docstring''' while r - l > 1: _A = (l + r) // 2 if v[m] >= key: _A = m else: _A = m # noqa: E741 return r def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return 0 _A = [0] * len(_lowercase ) _A = 1 _A = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: _A = v[i] elif v[i] > tail[length - 1]: _A = v[i] length += 1 else: _A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
62
1
class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Any , __A: List[Any] ) -> Any: _A = val _A = None _A = None def __A ( self: Optional[Any] , __A: List[Any] ) -> str: if self.val: if val < self.val: if self.left is None: _A = Node(__A ) else: self.left.insert(__A ) elif val > self.val: if self.right is None: _A = Node(__A ) else: self.right.insert(__A ) else: _A = val def __A ( _lowercase , _lowercase ): '''simple docstring''' if root: inorder(root.left , _lowercase ) res.append(root.val ) inorder(root.right , _lowercase ) def __A ( _lowercase ): '''simple docstring''' if len(_lowercase ) == 0: return arr _A = Node(arr[0] ) for i in range(1 , len(_lowercase ) ): root.insert(arr[i] ) # Traverse BST in order. _A = [] inorder(_lowercase , _lowercase ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
62
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __A = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "sequence-classification" def __init__( self: str , __A: Union[str, Any] ) -> List[str]: if type(__A ) == dict: _A = Namespace(**__A ) _A = glue_output_modes[hparams.task] _A = glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]: return self.model(**__A ) def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A = outputs[0] _A = self.trainer.lr_schedulers[0]['''scheduler'''] _A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self: List[str] ) -> Dict: _A = self.hparams _A = processors[args.task]() _A = processor.get_labels() for mode in ["train", "dev"]: _A = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _A = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) _A = convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __A ) torch.save(__A , __A ) def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader: _A = '''dev''' if mode == '''test''' else mode _A = self._feature_file(__A ) logger.info('''Loading features from cached file %s''' , __A ) _A = torch.load(__A ) _A = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _A = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _A = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def __A ( self: List[str] , __A: str , __A: Tuple ) -> str: _A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None _A = self(**__A ) _A ,_A = outputs[:2] _A = logits.detach().cpu().numpy() _A = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self: str , __A: Dict ) -> tuple: _A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() _A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _A = np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _A = np.squeeze(__A ) _A = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _A = [[] for _ in range(out_label_ids.shape[0] )] _A = [[] for _ in range(out_label_ids.shape[0] )] _A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _A = dict(results.items() ) _A = results return ret, preds_list, out_label_list def __A ( self: Any , __A: list ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self: int , __A: Union[str, Any] ) -> dict: _A ,_A ,_A = self._eval_end(__A ) _A = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]: BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=__A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser() add_generic_args(_lowercase , os.getcwd() ) _A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() ) _A = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A = os.path.join( '''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) _A = GLUETransformer(_lowercase ) _A = generic_train(_lowercase , _lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) ) _A = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_lowercase ) if __name__ == "__main__": main()
62
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self: List[Any] , __A: Dict[str, int] , __A: List[str] , __A: int = None , __A: int = None ) -> Any: super().__init__() _A = pad_token_id _A = max_length _A = vocab _A = merges _A = BytePairTokenizer(__A , __A , sequence_length=__A ) @classmethod def __A ( cls: int , __A: GPTaTokenizer , *__A: Any , **__A: Optional[int] ) -> int: _A = [''' '''.join(__A ) for m in tokenizer.bpe_ranks.keys()] _A = tokenizer.get_vocab() return cls(__A , __A , *__A , **__A ) @classmethod def __A ( cls: Optional[int] , __A: Union[str, os.PathLike] , *__A: str , **__A: Tuple ) -> Optional[Any]: _A = GPTaTokenizer.from_pretrained(__A , *__A , **__A ) return cls.from_tokenizer(__A , *__A , **__A ) @classmethod def __A ( cls: int , __A: Tuple ) -> Tuple: return cls(**__A ) def __A ( self: str ) -> List[Any]: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def __A ( self: Optional[int] , __A: Union[str, Any] , __A: int = None ) -> Dict: _A = self.tf_tokenizer(__A ) _A = tf.ones_like(__A ) if self.pad_token_id is not None: # pad the tokens up to max length _A = max_length if max_length is not None else self.max_length if max_length is not None: _A ,_A = pad_model_inputs( __A , max_seq_length=__A , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
62
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __A ( _lowercase = "" ): '''simple docstring''' _A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' _A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' ) _A = soup.find_all('''td''' , attrs='''titleColumn''' ) _A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowercase , _lowercase ) } def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ): '''simple docstring''' _A = get_imdb_top_aaa_movies() with open(_lowercase , '''w''' , newline='''''' ) as out_file: _A = csv.writer(_lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __A = logging.getLogger(__name__) def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , ): '''simple docstring''' _A = bnb_quantization_config.load_in_abit _A = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) _A = [] # custom device map if isinstance(_lowercase , _lowercase ) and len(device_map.keys() ) > 1: _A = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: _A = get_keys_to_not_convert(_lowercase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_lowercase ) _A = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: _A = [] _A = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_lowercase ) # compatibility with peft _A = load_in_abit _A = load_in_abit _A = get_parameter_device(_lowercase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) _A = replace_with_bnb_layers(_lowercase , _lowercase , modules_to_not_convert=_lowercase ) # convert param to the right dtype _A = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: _A = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) _A = getattr(_lowercase , _lowercase , _lowercase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_lowercase ): param.to(_lowercase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): _A = replace_with_bnb_layers( _lowercase , _lowercase , modules_to_not_convert=_lowercase ) _A = get_quantized_model_device_map( _lowercase , _lowercase , _lowercase , max_memory=_lowercase , no_split_module_classes=_lowercase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): _A = True _A = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( _lowercase , _lowercase , _lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowercase , offload_state_dict=_lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_lowercase , device_map=_lowercase , offload_dir=_lowercase ) def __A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ): '''simple docstring''' if device_map is None: if torch.cuda.is_available(): _A = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(_lowercase , _lowercase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) _A = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) _A = {} _A = special_dtypes _A = no_split_module_classes _A = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": _A = get_balanced_memory( _lowercase , low_zero=(device_map == '''balanced_low_0''') , max_memory=_lowercase , **_lowercase , ) _A = max_memory _A = infer_auto_device_map(_lowercase , **_lowercase ) if isinstance(_lowercase , _lowercase ): # check if don't have any quantized module on the cpu _A = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules _A = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def __A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None ): '''simple docstring''' if modules_to_not_convert is None: _A = [] _A ,_A = _replace_with_bnb_layers( _lowercase , _lowercase , _lowercase , _lowercase ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def __A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , ): '''simple docstring''' _A = False for name, module in model.named_children(): if current_key_name is None: _A = [] current_key_name.append(_lowercase ) if isinstance(_lowercase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` _A = '''.'''.join(_lowercase ) _A = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: _A = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: _A = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowercase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: _A = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) _A = module.weight.data if module.bias is not None: _A = module.bias.data bnb_module.requires_grad_(_lowercase ) setattr(_lowercase , _lowercase , _lowercase ) _A = True if len(list(module.children() ) ) > 0: _A ,_A = _replace_with_bnb_layers( _lowercase , _lowercase , _lowercase , _lowercase ) _A = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __A ( _lowercase ): '''simple docstring''' with init_empty_weights(): _A = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` _A = find_tied_parameters(_lowercase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowercase , _lowercase ): _A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _A = sum(_lowercase , [] ) _A = len(_lowercase ) > 0 # Check if it is a base model _A = False if hasattr(_lowercase , '''base_model_prefix''' ): _A = not hasattr(_lowercase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _A = list(model.named_children() ) _A = [list_modules[-1][0]] # add last module together with tied weights _A = set(_lowercase ) - set(_lowercase ) _A = list(set(_lowercase ) ) + list(_lowercase ) # remove ".weight" from the keys _A = ['''.weight''', '''.bias'''] _A = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _A = name.replace(_lowercase , '''''' ) filtered_module_names.append(_lowercase ) return filtered_module_names def __A ( _lowercase ): '''simple docstring''' for m in model.modules(): if isinstance(_lowercase , bnb.nn.Linearabit ): return True return False def __A ( _lowercase ): '''simple docstring''' return next(parameter.parameters() ).device def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if fpaa_statistics is None: set_module_tensor_to_device(_lowercase , _lowercase , 0 , dtype=_lowercase , value=_lowercase ) _A = param_name _A = model if "." in tensor_name: _A = tensor_name.split('''.''' ) for split in splits[:-1]: _A = getattr(_lowercase , _lowercase ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) _A = new_module _A = splits[-1] # offload weights _A = False offload_weight(module._parameters[tensor_name] , _lowercase , _lowercase , index=_lowercase ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , _lowercase , index=_lowercase , ) else: offload_weight(_lowercase , _lowercase , _lowercase , index=_lowercase ) offload_weight(_lowercase , param_name.replace('''weight''' , '''SCB''' ) , _lowercase , index=_lowercase ) set_module_tensor_to_device(_lowercase , _lowercase , '''meta''' , dtype=_lowercase , value=torch.empty(*param.size() ) )
62
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = BlenderbotSmallTokenizer A_ = False def __A ( self: List[str] ) -> int: super().setUp() _A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] _A = dict(zip(__A , range(len(__A ) ) ) ) _A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] _A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__A ) ) def __A ( self: str , **__A: Optional[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: str , __A: List[str] ) -> int: _A = '''adapt act apte''' _A = '''adapt act apte''' return input_text, output_text def __A ( self: Union[str, Any] ) -> Any: _A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A = '''adapt act apte''' _A = ['''adapt''', '''act''', '''ap@@''', '''te'''] _A = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) _A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] _A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __A ( self: Any ) -> List[str]: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] _A = '''I am a small frog.''' _A = tok([src_text] , padding=__A , truncation=__A )['''input_ids'''] _A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __A ( self: Any ) -> int: _A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) _A = '''I am a small frog .''' _A = '''.''' _A = tok(__A )['''input_ids'''] _A = tok(__A )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
62
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE : """simple docstring""" @staticmethod def __A ( *__A: int , **__A: List[Any] ) -> str: pass def __A ( _lowercase ): '''simple docstring''' _A = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def __A ( self: Optional[Any] , __A: str , __A: Dict , __A: Tuple ) -> Tuple: _A = DepthEstimationPipeline(model=__A , image_processor=__A ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __A ( self: Tuple , __A: Tuple , __A: Union[str, Any] ) -> str: _A = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __A ) import datasets _A = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) _A = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , __A , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def __A ( self: Any ) -> Optional[Any]: pass @slow @require_torch def __A ( self: List[Any] ) -> Tuple: _A = '''Intel/dpt-large''' _A = pipeline('''depth-estimation''' , model=__A ) _A = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) _A = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def __A ( self: List[Any] ) -> Optional[Any]: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
62
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "roberta" def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
1
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): __A = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: __A = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def __A ( _lowercase ): '''simple docstring''' _A = (images / 2 + 0.5).clamp(0 , 1 ) _A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _A = numpy_to_pil(_lowercase ) return images def __A ( _lowercase ): '''simple docstring''' if images.ndim == 3: _A = images[None, ...] _A = (images * 2_55).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _A = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _A = [Image.fromarray(_lowercase ) for image in images] return pil_images
62
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __A = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]: super().__init__(*__A , **__A ) _A = eval_examples _A = post_process_function _A = quant_trainer_args _A = 1_28 # default number of calibration samples def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _A = calib_dataset if calib_dataset is not None else self.calib_dataset _A = self._remove_unused_columns(__A , description='''Calibration''' ) return DataLoader( __A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , ) def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]: _A = self.train_dataset if calib_dataset is None else calib_dataset _A = self.get_calib_dataloader(__A ) _A = self.model quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A ) model.eval() quant_trainer.enable_calibration(__A ) logger.info('''***** Running calibration *****''' ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__A ): # Prediction step _A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__A , self.quant_trainer_args ) _A = model def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int: _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(__A ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _A = self.post_process_function(__A , __A , output.predictions ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) self.log(__A ) else: _A = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A ) return metrics def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]: _A = self.get_test_dataloader(__A ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A = eval_loop( __A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , ) finally: _A = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(__A , __A , output.predictions , '''predict''' ) _A = self.compute_metrics(__A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): _A = metrics.pop(__A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A ) def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]: _A = self.eval_dataset _A = self.get_eval_dataloader(__A ) _A = next(iter(__A ) ) # saving device - to make it consistent _A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _A = tuple(v.to(__A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _A = True _A = self.model.to(__A ) model.eval() model.float() _A = model.module if hasattr(__A , '''module''' ) else model quant_trainer.configure_model(__A , self.quant_trainer_args ) _A = os.path.join(__A , '''model.onnx''' ) logger.info(f"""exporting model to {output_model_file}""" ) _A = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( __A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=__A , ) logger.info('''onnx export finished''' )
62
1
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A ,_A = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
62
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
1
import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def __A ( _lowercase ): '''simple docstring''' if hor == 1_28: _A = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') _A = (32, 1_28, 2_56) _A = ('''UpResnetBlock1D''', '''UpResnetBlock1D''') elif hor == 32: _A = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') _A = (32, 64, 1_28, 2_56) _A = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''') _A = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) _A = model.state_dict() _A = { '''down_block_types''': down_block_types, '''block_out_channels''': block_out_channels, '''up_block_types''': up_block_types, '''layers_per_block''': 1, '''use_timestep_embedding''': True, '''out_block_type''': '''OutConv1DBlock''', '''norm_num_groups''': 8, '''downsample_each_block''': False, '''in_channels''': 14, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''sample_size''': 6_55_36, '''mid_block_type''': '''MidResTemporalBlock1D''', '''act_fn''': '''mish''', } _A = UNetaDModel(**_lowercase ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) _A = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _A = state_dict.pop(_lowercase ) hf_value_function.load_state_dict(_lowercase ) torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f: json.dump(_lowercase , _lowercase ) def __A ( ): '''simple docstring''' _A = { '''in_channels''': 14, '''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''), '''up_block_types''': (), '''out_block_type''': '''ValueFunction''', '''mid_block_type''': '''ValueFunctionMidBlock1D''', '''block_out_channels''': (32, 64, 1_28, 2_56), '''layers_per_block''': 1, '''downsample_each_block''': True, '''sample_size''': 6_55_36, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''use_timestep_embedding''': True, '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''norm_num_groups''': 8, '''act_fn''': '''mish''', } _A = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' ) _A = model _A = UNetaDModel(**_lowercase ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) _A = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _A = state_dict.pop(_lowercase ) hf_value_function.load_state_dict(_lowercase ) torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' ) with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f: json.dump(_lowercase , _lowercase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "xlm-roberta" def __init__( self: List[Any] , __A: List[str]=3_05_22 , __A: Optional[Any]=7_68 , __A: str=12 , __A: Dict=12 , __A: int=30_72 , __A: Union[str, Any]="gelu" , __A: Tuple=0.1 , __A: Dict=0.1 , __A: List[Any]=5_12 , __A: int=2 , __A: Any=0.02 , __A: Optional[int]=1e-12 , __A: Union[str, Any]=1 , __A: Dict=0 , __A: Optional[Any]=2 , __A: Dict="absolute" , __A: List[str]=True , __A: List[str]=None , **__A: Any , ) -> Dict: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" @property def __A ( self: Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
62
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_token_type_ids _A = use_input_mask _A = use_labels _A = use_mc_token_ids _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = self.vocab_size - 1 def __A ( self: Optional[int] ) -> Union[str, Any]: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None if self.use_mc_token_ids: _A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() _A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self: Optional[int] ) -> List[Any]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]: _A = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) _A = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str: _A = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self: Optional[int] ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) ,( _A ) , ) = config_and_inputs _A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any: _A = self.num_labels _A = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () A_ = (CTRLLMHeadModel,) if is_torch_available() else () A_ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self: Any ) -> Union[str, Any]: _A = CTRLModelTester(self ) _A = ConfigTester(self , config_class=__A , n_embd=37 ) def __A ( self: Optional[int] ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self: Dict ) -> Any: self.config_tester.run_common_tests() def __A ( self: str ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def __A ( self: List[str] ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self: Optional[Any] ) -> int: pass @slow def __A ( self: Tuple ) -> Dict: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __A ( self: Any ) -> Union[str, Any]: pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: int ) -> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self: Any ) -> Any: _A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(__A ) _A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is _A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a _A = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
62
1
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __A ( _lowercase ): '''simple docstring''' _A = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __A ( _lowercase ): '''simple docstring''' _A ,_A = emb.weight.shape _A = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) _A = emb.weight.data return lin_layer def __A ( _lowercase ): '''simple docstring''' _A = torch.load(_lowercase , map_location='''cpu''' ) _A = Namespace(**checkpoint['''cfg''']['''model'''] ) _A = checkpoint['''model'''] remove_ignore_keys_(_lowercase ) _A = state_dict['''decoder.embed_tokens.weight'''].shape[0] _A = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} _A = XGLMConfig( vocab_size=_lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) _A = XGLMForCausalLM(_lowercase ) _A = model.load_state_dict(_lowercase , strict=_lowercase ) print(_lowercase ) _A = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') __A = parser.parse_args() __A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
62
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowercase , _lowercase , _lowercase ) order.append(_lowercase ) return order def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = True _A = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowercase , _lowercase , _lowercase ) return component def __A ( _lowercase ): '''simple docstring''' _A = len(_lowercase ) * [False] _A = {vert: [] for vert in range(len(_lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowercase ) _A = [] for i, was_visited in enumerate(_lowercase ): if not was_visited: order += topology_sort(_lowercase , _lowercase , _lowercase ) _A = [] _A = len(_lowercase ) * [False] for i in range(len(_lowercase ) ): _A = order[len(_lowercase ) - i - 1] if not visited[vert]: _A = find_components(_lowercase , _lowercase , _lowercase ) components_list.append(_lowercase ) return components_list
62
1